Total coverage: 210767 (12%)of 1827205
7 95 7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Checksumming functions for IP, TCP, UDP and so on * * Authors: Jorge Cwik, <jorge@laser.satlink.net> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Borrows very liberally from tcp.c and ip.c, see those * files for more names. */ #ifndef _CHECKSUM_H #define _CHECKSUM_H #include <linux/errno.h> #include <asm/types.h> #include <asm/byteorder.h> #include <asm/checksum.h> #if !defined(_HAVE_ARCH_COPY_AND_CSUM_FROM_USER) || !defined(HAVE_CSUM_COPY_USER) #include <linux/uaccess.h> #endif #ifndef _HAVE_ARCH_COPY_AND_CSUM_FROM_USER static __always_inline __wsum csum_and_copy_from_user (const void __user *src, void *dst, int len) { if (copy_from_user(dst, src, len)) return 0; return csum_partial(dst, len, ~0U); } #endif #ifndef HAVE_CSUM_COPY_USER static __always_inline __wsum csum_and_copy_to_user (const void *src, void __user *dst, int len) { __wsum sum = csum_partial(src, len, ~0U); if (copy_to_user(dst, src, len) == 0) return sum; return 0; } #endif #ifndef _HAVE_ARCH_CSUM_AND_COPY static __always_inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len) { memcpy(dst, src, len); return csum_partial(dst, len, 0); } #endif #ifndef HAVE_ARCH_CSUM_ADD static __always_inline __wsum csum_add(__wsum csum, __wsum addend) { u32 res = (__force u32)csum; res += (__force u32)addend; return (__force __wsum)(res + (res < (__force u32)addend)); } #endif static __always_inline __wsum csum_sub(__wsum csum, __wsum addend) { return csum_add(csum, ~addend); } static __always_inline __sum16 csum16_add(__sum16 csum, __be16 addend) { u16 res = (__force u16)csum; res += (__force u16)addend; return (__force __sum16)(res + (res < (__force u16)addend)); } static __always_inline __sum16 csum16_sub(__sum16 csum, __be16 addend) { return csum16_add(csum, ~addend); } #ifndef HAVE_ARCH_CSUM_SHIFT static __always_inline __wsum csum_shift(__wsum sum, int offset) { /* rotate sum to align it with a 16b boundary */ if (offset & 1) return (__force __wsum)ror32((__force u32)sum, 8); return sum; } #endif static __always_inline __wsum csum_block_add(__wsum csum, __wsum csum2, int offset) { return csum_add(csum, csum_shift(csum2, offset)); } static __always_inline __wsum csum_block_add_ext(__wsum csum, __wsum csum2, int offset, int len) { return csum_block_add(csum, csum2, offset); } static __always_inline __wsum csum_block_sub(__wsum csum, __wsum csum2, int offset) { return csum_block_add(csum, ~csum2, offset); } static __always_inline __wsum csum_unfold(__sum16 n) { return (__force __wsum)n; } static __always_inline __wsum csum_partial_ext(const void *buff, int len, __wsum sum) { return csum_partial(buff, len, sum); } #define CSUM_MANGLED_0 ((__force __sum16)0xffff) static __always_inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) { *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); } static __always_inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) { __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); *sum = csum_fold(csum_add(tmp, (__force __wsum)to)); } /* Implements RFC 1624 (Incremental Internet Checksum) * 3. Discussion states : * HC' = ~(~HC + ~m + m') * m : old value of a 16bit field * m' : new value of a 16bit field */ static __always_inline void csum_replace2(__sum16 *sum, __be16 old, __be16 new) { *sum = ~csum16_add(csum16_sub(~(*sum), old), new); } static inline void csum_replace(__wsum *csum, __wsum old, __wsum new) { *csum = csum_add(csum_sub(*csum, old), new); } static inline unsigned short csum_from32to16(unsigned int sum) { sum += (sum >> 16) | (sum << 16); return (unsigned short)(sum >> 16); } struct sk_buff; void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, __be32 from, __be32 to, bool pseudohdr); void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb, const __be32 *from, const __be32 *to, bool pseudohdr); void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb, __wsum diff, bool pseudohdr); static __always_inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb, __be16 from, __be16 to, bool pseudohdr) { inet_proto_csum_replace4(sum, skb, (__force __be32)from, (__force __be32)to, pseudohdr); } static __always_inline __wsum remcsum_adjust(void *ptr, __wsum csum, int start, int offset) { __sum16 *psum = (__sum16 *)(ptr + offset); __wsum delta; /* Subtract out checksum up to start */ csum = csum_sub(csum, csum_partial(ptr, start, 0)); /* Set derived checksum in packet */ delta = csum_sub((__force __wsum)csum_fold(csum), (__force __wsum)*psum); *psum = csum_fold(csum); return delta; } static __always_inline void remcsum_unadjust(__sum16 *psum, __wsum delta) { *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum)); } static __always_inline __wsum wsum_negate(__wsum val) { return (__force __wsum)-((__force u32)val); } #endif
33 34 141 105 79 26 3 6 6 224 8 217 74 131 8 139 8 5 123 6 6 4 132 16 16 16 36 28 19 50 50 100 19 86 21 6 17 10 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 // SPDX-License-Identifier: GPL-2.0-or-later /* * Fast Userspace Mutexes (which I call "Futexes!"). * (C) Rusty Russell, IBM 2002 * * Generalized futexes, futex requeueing, misc fixes by Ingo Molnar * (C) Copyright 2003 Red Hat Inc, All Rights Reserved * * Removed page pinning, fix privately mapped COW pages and other cleanups * (C) Copyright 2003, 2004 Jamie Lokier * * Robust futex support started by Ingo Molnar * (C) Copyright 2006 Red Hat Inc, All Rights Reserved * Thanks to Thomas Gleixner for suggestions, analysis and fixes. * * PI-futex support started by Ingo Molnar and Thomas Gleixner * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * * PRIVATE futexes by Eric Dumazet * Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com> * * Requeue-PI support by Darren Hart <dvhltc@us.ibm.com> * Copyright (C) IBM Corporation, 2009 * Thanks to Thomas Gleixner for conceptual design and careful reviews. * * Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly * enough at me, Linus for the original (flawed) idea, Matthew * Kirkwood for proof-of-concept implementation. * * "The futexes are also cursed." * "But they come in a choice of three flavours!" */ #include <linux/compat.h> #include <linux/jhash.h> #include <linux/pagemap.h> #include <linux/debugfs.h> #include <linux/plist.h> #include <linux/memblock.h> #include <linux/fault-inject.h> #include <linux/slab.h> #include "futex.h" #include "../locking/rtmutex_common.h" /* * The base of the bucket array and its size are always used together * (after initialization only in futex_hash()), so ensure that they * reside in the same cacheline. */ static struct { struct futex_hash_bucket *queues; unsigned long hashsize; } __futex_data __read_mostly __aligned(2*sizeof(long)); #define futex_queues (__futex_data.queues) #define futex_hashsize (__futex_data.hashsize) /* * Fault injections for futexes. */ #ifdef CONFIG_FAIL_FUTEX static struct { struct fault_attr attr; bool ignore_private; } fail_futex = { .attr = FAULT_ATTR_INITIALIZER, .ignore_private = false, }; static int __init setup_fail_futex(char *str) { return setup_fault_attr(&fail_futex.attr, str); } __setup("fail_futex=", setup_fail_futex); bool should_fail_futex(bool fshared) { if (fail_futex.ignore_private && !fshared) return false; return should_fail(&fail_futex.attr, 1); } #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init fail_futex_debugfs(void) { umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; dir = fault_create_debugfs_attr("fail_futex", NULL, &fail_futex.attr); if (IS_ERR(dir)) return PTR_ERR(dir); debugfs_create_bool("ignore-private", mode, dir, &fail_futex.ignore_private); return 0; } late_initcall(fail_futex_debugfs); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ #endif /* CONFIG_FAIL_FUTEX */ /** * futex_hash - Return the hash bucket in the global hash * @key: Pointer to the futex key for which the hash is calculated * * We hash on the keys returned from get_futex_key (see below) and return the * corresponding hash bucket in the global hash. */ struct futex_hash_bucket *futex_hash(union futex_key *key) { u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4, key->both.offset); return &futex_queues[hash & (futex_hashsize - 1)]; } /** * futex_setup_timer - set up the sleeping hrtimer. * @time: ptr to the given timeout value * @timeout: the hrtimer_sleeper structure to be set up * @flags: futex flags * @range_ns: optional range in ns * * Return: Initialized hrtimer_sleeper structure or NULL if no timeout * value given */ struct hrtimer_sleeper * futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, int flags, u64 range_ns) { if (!time) return NULL; hrtimer_setup_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ? CLOCK_REALTIME : CLOCK_MONOTONIC, HRTIMER_MODE_ABS); /* * If range_ns is 0, calling hrtimer_set_expires_range_ns() is * effectively the same as calling hrtimer_set_expires(). */ hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); return timeout; } /* * Generate a machine wide unique identifier for this inode. * * This relies on u64 not wrapping in the life-time of the machine; which with * 1ns resolution means almost 585 years. * * This further relies on the fact that a well formed program will not unmap * the file while it has a (shared) futex waiting on it. This mapping will have * a file reference which pins the mount and inode. * * If for some reason an inode gets evicted and read back in again, it will get * a new sequence number and will _NOT_ match, even though it is the exact same * file. * * It is important that futex_match() will never have a false-positive, esp. * for PI futexes that can mess up the state. The above argues that false-negatives * are only possible for malformed programs. */ static u64 get_inode_sequence_number(struct inode *inode) { static atomic64_t i_seq; u64 old; /* Does the inode already have a sequence number? */ old = atomic64_read(&inode->i_sequence); if (likely(old)) return old; for (;;) { u64 new = atomic64_inc_return(&i_seq); if (WARN_ON_ONCE(!new)) continue; old = 0; if (!atomic64_try_cmpxchg_relaxed(&inode->i_sequence, &old, new)) return old; return new; } } /** * get_futex_key() - Get parameters which are the keys for a futex * @uaddr: virtual address of the futex * @flags: FLAGS_* * @key: address where result is stored. * @rw: mapping needs to be read/write (values: FUTEX_READ, * FUTEX_WRITE) * * Return: a negative error code or 0 * * The key words are stored in @key on success. * * For shared mappings (when @fshared), the key is: * * ( inode->i_sequence, page->index, offset_within_page ) * * [ also see get_inode_sequence_number() ] * * For private mappings (or when !@fshared), the key is: * * ( current->mm, address, 0 ) * * This allows (cross process, where applicable) identification of the futex * without keeping the page pinned for the duration of the FUTEX_WAIT. * * lock_page() might sleep, the caller should not hold a spinlock. */ int get_futex_key(u32 __user *uaddr, unsigned int flags, union futex_key *key, enum futex_access rw) { unsigned long address = (unsigned long)uaddr; struct mm_struct *mm = current->mm; struct page *page; struct folio *folio; struct address_space *mapping; int err, ro = 0; bool fshared; fshared = flags & FLAGS_SHARED; /* * The futex address must be "naturally" aligned. */ key->both.offset = address % PAGE_SIZE; if (unlikely((address % sizeof(u32)) != 0)) return -EINVAL; address -= key->both.offset; if (unlikely(!access_ok(uaddr, sizeof(u32)))) return -EFAULT; if (unlikely(should_fail_futex(fshared))) return -EFAULT; /* * PROCESS_PRIVATE futexes are fast. * As the mm cannot disappear under us and the 'key' only needs * virtual address, we dont even have to find the underlying vma. * Note : We do have to check 'uaddr' is a valid user address, * but access_ok() should be faster than find_vma() */ if (!fshared) { /* * On no-MMU, shared futexes are treated as private, therefore * we must not include the current process in the key. Since * there is only one address space, the address is a unique key * on its own. */ if (IS_ENABLED(CONFIG_MMU)) key->private.mm = mm; else key->private.mm = NULL; key->private.address = address; return 0; } again: /* Ignore any VERIFY_READ mapping (futex common case) */ if (unlikely(should_fail_futex(true))) return -EFAULT; err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); /* * If write access is not required (eg. FUTEX_WAIT), try * and get read-only access. */ if (err == -EFAULT && rw == FUTEX_READ) { err = get_user_pages_fast(address, 1, 0, &page); ro = 1; } if (err < 0) return err; else err = 0; /* * The treatment of mapping from this point on is critical. The folio * lock protects many things but in this context the folio lock * stabilizes mapping, prevents inode freeing in the shared * file-backed region case and guards against movement to swap cache. * * Strictly speaking the folio lock is not needed in all cases being * considered here and folio lock forces unnecessarily serialization. * From this point on, mapping will be re-verified if necessary and * folio lock will be acquired only if it is unavoidable * * Mapping checks require the folio so it is looked up now. For * anonymous pages, it does not matter if the folio is split * in the future as the key is based on the address. For * filesystem-backed pages, the precise page is required as the * index of the page determines the key. */ folio = page_folio(page); mapping = READ_ONCE(folio->mapping); /* * If folio->mapping is NULL, then it cannot be an anonymous * page; but it might be the ZERO_PAGE or in the gate area or * in a special mapping (all cases which we are happy to fail); * or it may have been a good file page when get_user_pages_fast * found it, but truncated or holepunched or subjected to * invalidate_complete_page2 before we got the folio lock (also * cases which we are happy to fail). And we hold a reference, * so refcount care in invalidate_inode_page's remove_mapping * prevents drop_caches from setting mapping to NULL beneath us. * * The case we do have to guard against is when memory pressure made * shmem_writepage move it from filecache to swapcache beneath us: * an unlikely race, but we do need to retry for folio->mapping. */ if (unlikely(!mapping)) { int shmem_swizzled; /* * Folio lock is required to identify which special case above * applies. If this is really a shmem page then the folio lock * will prevent unexpected transitions. */ folio_lock(folio); shmem_swizzled = folio_test_swapcache(folio) || folio->mapping; folio_unlock(folio); folio_put(folio); if (shmem_swizzled) goto again; return -EFAULT; } /* * Private mappings are handled in a simple way. * * If the futex key is stored in anonymous memory, then the associated * object is the mm which is implicitly pinned by the calling process. * * NOTE: When userspace waits on a MAP_SHARED mapping, even if * it's a read-only handle, it's expected that futexes attach to * the object not the particular process. */ if (folio_test_anon(folio)) { /* * A RO anonymous page will never change and thus doesn't make * sense for futex operations. */ if (unlikely(should_fail_futex(true)) || ro) { err = -EFAULT; goto out; } key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */ key->private.mm = mm; key->private.address = address; } else { struct inode *inode; /* * The associated futex object in this case is the inode and * the folio->mapping must be traversed. Ordinarily this should * be stabilised under folio lock but it's not strictly * necessary in this case as we just want to pin the inode, not * update i_pages or anything like that. * * The RCU read lock is taken as the inode is finally freed * under RCU. If the mapping still matches expectations then the * mapping->host can be safely accessed as being a valid inode. */ rcu_read_lock(); if (READ_ONCE(folio->mapping) != mapping) { rcu_read_unlock(); folio_put(folio); goto again; } inode = READ_ONCE(mapping->host); if (!inode) { rcu_read_unlock(); folio_put(folio); goto again; } key->both.offset |= FUT_OFF_INODE; /* inode-based key */ key->shared.i_seq = get_inode_sequence_number(inode); key->shared.pgoff = page_pgoff(folio, page); rcu_read_unlock(); } out: folio_put(folio); return err; } /** * fault_in_user_writeable() - Fault in user address and verify RW access * @uaddr: pointer to faulting user space address * * Slow path to fixup the fault we just took in the atomic write * access to @uaddr. * * We have no generic implementation of a non-destructive write to the * user address. We know that we faulted in the atomic pagefault * disabled section so we can as well avoid the #PF overhead by * calling get_user_pages() right away. */ int fault_in_user_writeable(u32 __user *uaddr) { struct mm_struct *mm = current->mm; int ret; mmap_read_lock(mm); ret = fixup_user_fault(mm, (unsigned long)uaddr, FAULT_FLAG_WRITE, NULL); mmap_read_unlock(mm); return ret < 0 ? ret : 0; } /** * futex_top_waiter() - Return the highest priority waiter on a futex * @hb: the hash bucket the futex_q's reside in * @key: the futex key (to distinguish it from other futex futex_q's) * * Must be called with the hb lock held. */ struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, union futex_key *key) { struct futex_q *this; plist_for_each_entry(this, &hb->chain, list) { if (futex_match(&this->key, key)) return this; } return NULL; } /** * wait_for_owner_exiting - Block until the owner has exited * @ret: owner's current futex lock status * @exiting: Pointer to the exiting task * * Caller must hold a refcount on @exiting. */ void wait_for_owner_exiting(int ret, struct task_struct *exiting) { if (ret != -EBUSY) { WARN_ON_ONCE(exiting); return; } if (WARN_ON_ONCE(ret == -EBUSY && !exiting)) return; mutex_lock(&exiting->futex_exit_mutex); /* * No point in doing state checking here. If the waiter got here * while the task was in exec()->exec_futex_release() then it can * have any FUTEX_STATE_* value when the waiter has acquired the * mutex. OK, if running, EXITING or DEAD if it reached exit() * already. Highly unlikely and not a problem. Just one more round * through the futex maze. */ mutex_unlock(&exiting->futex_exit_mutex); put_task_struct(exiting); } /** * __futex_unqueue() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be NULL and must be held by the caller. */ void __futex_unqueue(struct futex_q *q) { struct futex_hash_bucket *hb; if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) return; lockdep_assert_held(q->lock_ptr); hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); plist_del(&q->list, &hb->chain); futex_hb_waiters_dec(hb); } /* The key must be already stored in q->key. */ struct futex_hash_bucket *futex_q_lock(struct futex_q *q) __acquires(&hb->lock) { struct futex_hash_bucket *hb; hb = futex_hash(&q->key); /* * Increment the counter before taking the lock so that * a potential waker won't miss a to-be-slept task that is * waiting for the spinlock. This is safe as all futex_q_lock() * users end up calling futex_queue(). Similarly, for housekeeping, * decrement the counter at futex_q_unlock() when some error has * occurred and we don't end up adding the task to the list. */ futex_hb_waiters_inc(hb); /* implies smp_mb(); (A) */ q->lock_ptr = &hb->lock; spin_lock(&hb->lock); return hb; } void futex_q_unlock(struct futex_hash_bucket *hb) __releases(&hb->lock) { spin_unlock(&hb->lock); futex_hb_waiters_dec(hb); } void __futex_queue(struct futex_q *q, struct futex_hash_bucket *hb) { int prio; /* * The priority used to register this element is * - either the real thread-priority for the real-time threads * (i.e. threads with a priority lower than MAX_RT_PRIO) * - or MAX_RT_PRIO for non-RT threads. * Thus, all RT-threads are woken first in priority order, and * the others are woken last, in FIFO order. */ prio = min(current->normal_prio, MAX_RT_PRIO); plist_node_init(&q->list, prio); plist_add(&q->list, &hb->chain); q->task = current; } /** * futex_unqueue() - Remove the futex_q from its futex_hash_bucket * @q: The futex_q to unqueue * * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must * be paired with exactly one earlier call to futex_queue(). * * Return: * - 1 - if the futex_q was still queued (and we removed unqueued it); * - 0 - if the futex_q was already removed by the waking thread */ int futex_unqueue(struct futex_q *q) { spinlock_t *lock_ptr; int ret = 0; /* In the common case we don't take the spinlock, which is nice. */ retry: /* * q->lock_ptr can change between this read and the following spin_lock. * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and * optimizing lock_ptr out of the logic below. */ lock_ptr = READ_ONCE(q->lock_ptr); if (lock_ptr != NULL) { spin_lock(lock_ptr); /* * q->lock_ptr can change between reading it and * spin_lock(), causing us to take the wrong lock. This * corrects the race condition. * * Reasoning goes like this: if we have the wrong lock, * q->lock_ptr must have changed (maybe several times) * between reading it and the spin_lock(). It can * change again after the spin_lock() but only if it was * already changed before the spin_lock(). It cannot, * however, change back to the original value. Therefore * we can detect whether we acquired the correct lock. */ if (unlikely(lock_ptr != q->lock_ptr)) { spin_unlock(lock_ptr); goto retry; } __futex_unqueue(q); BUG_ON(q->pi_state); spin_unlock(lock_ptr); ret = 1; } return ret; } /* * PI futexes can not be requeued and must remove themselves from the hash * bucket. The hash bucket lock (i.e. lock_ptr) is held. */ void futex_unqueue_pi(struct futex_q *q) { /* * If the lock was not acquired (due to timeout or signal) then the * rt_waiter is removed before futex_q is. If this is observed by * an unlocker after dropping the rtmutex wait lock and before * acquiring the hash bucket lock, then the unlocker dequeues the * futex_q from the hash bucket list to guarantee consistent state * vs. userspace. Therefore the dequeue here must be conditional. */ if (!plist_node_empty(&q->list)) __futex_unqueue(q); BUG_ON(!q->pi_state); put_pi_state(q->pi_state); q->pi_state = NULL; } /* Constants for the pending_op argument of handle_futex_death */ #define HANDLE_DEATH_PENDING true #define HANDLE_DEATH_LIST false /* * Process a futex-list entry, check whether it's owned by the * dying task, and do notification if so: */ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, bool pi, bool pending_op) { u32 uval, nval, mval; pid_t owner; int err; /* Futex address must be 32bit aligned */ if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0) return -1; retry: if (get_user(uval, uaddr)) return -1; /* * Special case for regular (non PI) futexes. The unlock path in * user space has two race scenarios: * * 1. The unlock path releases the user space futex value and * before it can execute the futex() syscall to wake up * waiters it is killed. * * 2. A woken up waiter is killed before it can acquire the * futex in user space. * * In the second case, the wake up notification could be generated * by the unlock path in user space after setting the futex value * to zero or by the kernel after setting the OWNER_DIED bit below. * * In both cases the TID validation below prevents a wakeup of * potential waiters which can cause these waiters to block * forever. * * In both cases the following conditions are met: * * 1) task->robust_list->list_op_pending != NULL * @pending_op == true * 2) The owner part of user space futex value == 0 * 3) Regular futex: @pi == false * * If these conditions are met, it is safe to attempt waking up a * potential waiter without touching the user space futex value and * trying to set the OWNER_DIED bit. If the futex value is zero, * the rest of the user space mutex state is consistent, so a woken * waiter will just take over the uncontended futex. Setting the * OWNER_DIED bit would create inconsistent state and malfunction * of the user space owner died handling. Otherwise, the OWNER_DIED * bit is already set, and the woken waiter is expected to deal with * this. */ owner = uval & FUTEX_TID_MASK; if (pending_op && !pi && !owner) { futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1, FUTEX_BITSET_MATCH_ANY); return 0; } if (owner != task_pid_vnr(curr)) return 0; /* * Ok, this dying thread is truly holding a futex * of interest. Set the OWNER_DIED bit atomically * via cmpxchg, and if the value had FUTEX_WAITERS * set, wake up a waiter (if any). (We have to do a * futex_wake() even if OWNER_DIED is already set - * to handle the rare but possible case of recursive * thread-death.) The rest of the cleanup is done in * userspace. */ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; /* * We are not holding a lock here, but we want to have * the pagefault_disable/enable() protection because * we want to handle the fault gracefully. If the * access fails we try to fault in the futex with R/W * verification via get_user_pages. get_user() above * does not guarantee R/W access. If that fails we * give up and leave the futex locked. */ if ((err = futex_cmpxchg_value_locked(&nval, uaddr, uval, mval))) { switch (err) { case -EFAULT: if (fault_in_user_writeable(uaddr)) return -1; goto retry; case -EAGAIN: cond_resched(); goto retry; default: WARN_ON_ONCE(1); return err; } } if (nval != uval) goto retry; /* * Wake robust non-PI futexes here. The wakeup of * PI futexes happens in exit_pi_state(): */ if (!pi && (uval & FUTEX_WAITERS)) { futex_wake(uaddr, FLAGS_SIZE_32 | FLAGS_SHARED, 1, FUTEX_BITSET_MATCH_ANY); } return 0; } /* * Fetch a robust-list pointer. Bit 0 signals PI futexes: */ static inline int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, unsigned int *pi) { unsigned long uentry; if (get_user(uentry, (unsigned long __user *)head)) return -EFAULT; *entry = (void __user *)(uentry & ~1UL); *pi = uentry & 1; return 0; } /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ static void exit_robust_list(struct task_struct *curr) { struct robust_list_head __user *head = curr->robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned int next_pi; unsigned long futex_offset; int rc; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ if (fetch_robust_entry(&entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset: */ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists: */ if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) return; next_entry = NULL; /* avoid warning with gcc */ while (entry != &head->list) { /* * Fetch the next entry in the list before calling * handle_futex_death: */ rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi); /* * A pending lock might already be on the list, so * don't process it twice: */ if (entry != pending) { if (handle_futex_death((void __user *)entry + futex_offset, curr, pi, HANDLE_DEATH_LIST)) return; } if (rc) return; entry = next_entry; pi = next_pi; /* * Avoid excessively long or circular lists: */ if (!--limit) break; cond_resched(); } if (pending) { handle_futex_death((void __user *)pending + futex_offset, curr, pip, HANDLE_DEATH_PENDING); } } #ifdef CONFIG_COMPAT static void __user *futex_uaddr(struct robust_list __user *entry, compat_long_t futex_offset) { compat_uptr_t base = ptr_to_compat(entry); void __user *uaddr = compat_ptr(base + futex_offset); return uaddr; } /* * Fetch a robust-list pointer. Bit 0 signals PI futexes: */ static inline int compat_fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry, compat_uptr_t __user *head, unsigned int *pi) { if (get_user(*uentry, head)) return -EFAULT; *entry = compat_ptr((*uentry) & ~1); *pi = (unsigned int)(*uentry) & 1; return 0; } /* * Walk curr->robust_list (very carefully, it's a userspace list!) * and mark any locks found there dead, and notify any waiters. * * We silently return on any sign of list-walking problem. */ static void compat_exit_robust_list(struct task_struct *curr) { struct compat_robust_list_head __user *head = curr->compat_robust_list; struct robust_list __user *entry, *next_entry, *pending; unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; unsigned int next_pi; compat_uptr_t uentry, next_uentry, upending; compat_long_t futex_offset; int rc; /* * Fetch the list head (which was registered earlier, via * sys_set_robust_list()): */ if (compat_fetch_robust_entry(&uentry, &entry, &head->list.next, &pi)) return; /* * Fetch the relative futex offset: */ if (get_user(futex_offset, &head->futex_offset)) return; /* * Fetch any possibly pending lock-add first, and handle it * if it exists: */ if (compat_fetch_robust_entry(&upending, &pending, &head->list_op_pending, &pip)) return; next_entry = NULL; /* avoid warning with gcc */ while (entry != (struct robust_list __user *) &head->list) { /* * Fetch the next entry in the list before calling * handle_futex_death: */ rc = compat_fetch_robust_entry(&next_uentry, &next_entry, (compat_uptr_t __user *)&entry->next, &next_pi); /* * A pending lock might already be on the list, so * dont process it twice: */ if (entry != pending) { void __user *uaddr = futex_uaddr(entry, futex_offset); if (handle_futex_death(uaddr, curr, pi, HANDLE_DEATH_LIST)) return; } if (rc) return; uentry = next_uentry; entry = next_entry; pi = next_pi; /* * Avoid excessively long or circular lists: */ if (!--limit) break; cond_resched(); } if (pending) { void __user *uaddr = futex_uaddr(pending, futex_offset); handle_futex_death(uaddr, curr, pip, HANDLE_DEATH_PENDING); } } #endif #ifdef CONFIG_FUTEX_PI /* * This task is holding PI mutexes at exit time => bad. * Kernel cleans up PI-state, but userspace is likely hosed. * (Robust-futex cleanup is separate and might save the day for userspace.) */ static void exit_pi_state_list(struct task_struct *curr) { struct list_head *next, *head = &curr->pi_state_list; struct futex_pi_state *pi_state; struct futex_hash_bucket *hb; union futex_key key = FUTEX_KEY_INIT; /* * We are a ZOMBIE and nobody can enqueue itself on * pi_state_list anymore, but we have to be careful * versus waiters unqueueing themselves: */ raw_spin_lock_irq(&curr->pi_lock); while (!list_empty(head)) { next = head->next; pi_state = list_entry(next, struct futex_pi_state, list); key = pi_state->key; hb = futex_hash(&key); /* * We can race against put_pi_state() removing itself from the * list (a waiter going away). put_pi_state() will first * decrement the reference count and then modify the list, so * its possible to see the list entry but fail this reference * acquire. * * In that case; drop the locks to let put_pi_state() make * progress and retry the loop. */ if (!refcount_inc_not_zero(&pi_state->refcount)) { raw_spin_unlock_irq(&curr->pi_lock); cpu_relax(); raw_spin_lock_irq(&curr->pi_lock); continue; } raw_spin_unlock_irq(&curr->pi_lock); spin_lock(&hb->lock); raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); raw_spin_lock(&curr->pi_lock); /* * We dropped the pi-lock, so re-check whether this * task still owns the PI-state: */ if (head->next != next) { /* retain curr->pi_lock for the loop invariant */ raw_spin_unlock(&pi_state->pi_mutex.wait_lock); spin_unlock(&hb->lock); put_pi_state(pi_state); continue; } WARN_ON(pi_state->owner != curr); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); pi_state->owner = NULL; raw_spin_unlock(&curr->pi_lock); raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); spin_unlock(&hb->lock); rt_mutex_futex_unlock(&pi_state->pi_mutex); put_pi_state(pi_state); raw_spin_lock_irq(&curr->pi_lock); } raw_spin_unlock_irq(&curr->pi_lock); } #else static inline void exit_pi_state_list(struct task_struct *curr) { } #endif static void futex_cleanup(struct task_struct *tsk) { if (unlikely(tsk->robust_list)) { exit_robust_list(tsk); tsk->robust_list = NULL; } #ifdef CONFIG_COMPAT if (unlikely(tsk->compat_robust_list)) { compat_exit_robust_list(tsk); tsk->compat_robust_list = NULL; } #endif if (unlikely(!list_empty(&tsk->pi_state_list))) exit_pi_state_list(tsk); } /** * futex_exit_recursive - Set the tasks futex state to FUTEX_STATE_DEAD * @tsk: task to set the state on * * Set the futex exit state of the task lockless. The futex waiter code * observes that state when a task is exiting and loops until the task has * actually finished the futex cleanup. The worst case for this is that the * waiter runs through the wait loop until the state becomes visible. * * This is called from the recursive fault handling path in make_task_dead(). * * This is best effort. Either the futex exit code has run already or * not. If the OWNER_DIED bit has been set on the futex then the waiter can * take it over. If not, the problem is pushed back to user space. If the * futex exit code did not run yet, then an already queued waiter might * block forever, but there is nothing which can be done about that. */ void futex_exit_recursive(struct task_struct *tsk) { /* If the state is FUTEX_STATE_EXITING then futex_exit_mutex is held */ if (tsk->futex_state == FUTEX_STATE_EXITING) mutex_unlock(&tsk->futex_exit_mutex); tsk->futex_state = FUTEX_STATE_DEAD; } static void futex_cleanup_begin(struct task_struct *tsk) { /* * Prevent various race issues against a concurrent incoming waiter * including live locks by forcing the waiter to block on * tsk->futex_exit_mutex when it observes FUTEX_STATE_EXITING in * attach_to_pi_owner(). */ mutex_lock(&tsk->futex_exit_mutex); /* * Switch the state to FUTEX_STATE_EXITING under tsk->pi_lock. * * This ensures that all subsequent checks of tsk->futex_state in * attach_to_pi_owner() must observe FUTEX_STATE_EXITING with * tsk->pi_lock held. * * It guarantees also that a pi_state which was queued right before * the state change under tsk->pi_lock by a concurrent waiter must * be observed in exit_pi_state_list(). */ raw_spin_lock_irq(&tsk->pi_lock); tsk->futex_state = FUTEX_STATE_EXITING; raw_spin_unlock_irq(&tsk->pi_lock); } static void futex_cleanup_end(struct task_struct *tsk, int state) { /* * Lockless store. The only side effect is that an observer might * take another loop until it becomes visible. */ tsk->futex_state = state; /* * Drop the exit protection. This unblocks waiters which observed * FUTEX_STATE_EXITING to reevaluate the state. */ mutex_unlock(&tsk->futex_exit_mutex); } void futex_exec_release(struct task_struct *tsk) { /* * The state handling is done for consistency, but in the case of * exec() there is no way to prevent further damage as the PID stays * the same. But for the unlikely and arguably buggy case that a * futex is held on exec(), this provides at least as much state * consistency protection which is possible. */ futex_cleanup_begin(tsk); futex_cleanup(tsk); /* * Reset the state to FUTEX_STATE_OK. The task is alive and about * exec a new binary. */ futex_cleanup_end(tsk, FUTEX_STATE_OK); } void futex_exit_release(struct task_struct *tsk) { futex_cleanup_begin(tsk); futex_cleanup(tsk); futex_cleanup_end(tsk, FUTEX_STATE_DEAD); } static int __init futex_init(void) { unsigned int futex_shift; unsigned long i; #ifdef CONFIG_BASE_SMALL futex_hashsize = 16; #else futex_hashsize = roundup_pow_of_two(256 * num_possible_cpus()); #endif futex_queues = alloc_large_system_hash("futex", sizeof(*futex_queues), futex_hashsize, 0, 0, &futex_shift, NULL, futex_hashsize, futex_hashsize); futex_hashsize = 1UL << futex_shift; for (i = 0; i < futex_hashsize; i++) { atomic_set(&futex_queues[i].waiters, 0); plist_head_init(&futex_queues[i].chain); spin_lock_init(&futex_queues[i].lock); } return 0; } core_initcall(futex_init);
588 588 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 Politecnico di Torino, Italy * TORSEC group -- https://security.polito.it * * Author: Roberto Sassu <roberto.sassu@polito.it> * * File: ima_template.c * Helpers to manage template descriptors. */ #include <linux/rculist.h> #include "ima.h" #include "ima_template_lib.h" enum header_fields { HDR_PCR, HDR_DIGEST, HDR_TEMPLATE_NAME, HDR_TEMPLATE_DATA, HDR__LAST }; static struct ima_template_desc builtin_templates[] = { {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT}, {.name = "ima-ng", .fmt = "d-ng|n-ng"}, {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"}, {.name = "ima-ngv2", .fmt = "d-ngv2|n-ng"}, {.name = "ima-sigv2", .fmt = "d-ngv2|n-ng|sig"}, {.name = "ima-buf", .fmt = "d-ng|n-ng|buf"}, {.name = "ima-modsig", .fmt = "d-ng|n-ng|sig|d-modsig|modsig"}, {.name = "evm-sig", .fmt = "d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode"}, {.name = "", .fmt = ""}, /* placeholder for a custom format */ }; static LIST_HEAD(defined_templates); static DEFINE_SPINLOCK(template_list); static int template_setup_done; static const struct ima_template_field supported_fields[] = { {.field_id = "d", .field_init = ima_eventdigest_init, .field_show = ima_show_template_digest}, {.field_id = "n", .field_init = ima_eventname_init, .field_show = ima_show_template_string}, {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init, .field_show = ima_show_template_digest_ng}, {.field_id = "d-ngv2", .field_init = ima_eventdigest_ngv2_init, .field_show = ima_show_template_digest_ngv2}, {.field_id = "n-ng", .field_init = ima_eventname_ng_init, .field_show = ima_show_template_string}, {.field_id = "sig", .field_init = ima_eventsig_init, .field_show = ima_show_template_sig}, {.field_id = "buf", .field_init = ima_eventbuf_init, .field_show = ima_show_template_buf}, {.field_id = "d-modsig", .field_init = ima_eventdigest_modsig_init, .field_show = ima_show_template_digest_ng}, {.field_id = "modsig", .field_init = ima_eventmodsig_init, .field_show = ima_show_template_sig}, {.field_id = "evmsig", .field_init = ima_eventevmsig_init, .field_show = ima_show_template_sig}, {.field_id = "iuid", .field_init = ima_eventinodeuid_init, .field_show = ima_show_template_uint}, {.field_id = "igid", .field_init = ima_eventinodegid_init, .field_show = ima_show_template_uint}, {.field_id = "imode", .field_init = ima_eventinodemode_init, .field_show = ima_show_template_uint}, {.field_id = "xattrnames", .field_init = ima_eventinodexattrnames_init, .field_show = ima_show_template_string}, {.field_id = "xattrlengths", .field_init = ima_eventinodexattrlengths_init, .field_show = ima_show_template_sig}, {.field_id = "xattrvalues", .field_init = ima_eventinodexattrvalues_init, .field_show = ima_show_template_sig}, }; /* * Used when restoring measurements carried over from a kexec. 'd' and 'n' don't * need to be accounted for since they shouldn't be defined in the same template * description as 'd-ng' and 'n-ng' respectively. */ #define MAX_TEMPLATE_NAME_LEN \ sizeof("d-ng|n-ng|evmsig|xattrnames|xattrlengths|xattrvalues|iuid|igid|imode") static struct ima_template_desc *ima_template; static struct ima_template_desc *ima_buf_template; /** * ima_template_has_modsig - Check whether template has modsig-related fields. * @ima_template: IMA template to check. * * Tells whether the given template has fields referencing a file's appended * signature. */ bool ima_template_has_modsig(const struct ima_template_desc *ima_template) { int i; for (i = 0; i < ima_template->num_fields; i++) if (!strcmp(ima_template->fields[i]->field_id, "modsig") || !strcmp(ima_template->fields[i]->field_id, "d-modsig")) return true; return false; } static int __init ima_template_setup(char *str) { struct ima_template_desc *template_desc; int template_len = strlen(str); if (template_setup_done) return 1; if (!ima_template) ima_init_template_list(); /* * Verify that a template with the supplied name exists. * If not, use CONFIG_IMA_DEFAULT_TEMPLATE. */ template_desc = lookup_template_desc(str); if (!template_desc) { pr_err("template %s not found, using %s\n", str, CONFIG_IMA_DEFAULT_TEMPLATE); return 1; } /* * Verify whether the current hash algorithm is supported * by the 'ima' template. */ if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 && ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) { pr_err("template does not support hash alg\n"); return 1; } ima_template = template_desc; template_setup_done = 1; return 1; } __setup("ima_template=", ima_template_setup); static int __init ima_template_fmt_setup(char *str) { int num_templates = ARRAY_SIZE(builtin_templates); if (template_setup_done) return 1; if (template_desc_init_fields(str, NULL, NULL) < 0) { pr_err("format string '%s' not valid, using template %s\n", str, CONFIG_IMA_DEFAULT_TEMPLATE); return 1; } builtin_templates[num_templates - 1].fmt = str; ima_template = builtin_templates + num_templates - 1; template_setup_done = 1; return 1; } __setup("ima_template_fmt=", ima_template_fmt_setup); struct ima_template_desc *lookup_template_desc(const char *name) { struct ima_template_desc *template_desc; int found = 0; rcu_read_lock(); list_for_each_entry_rcu(template_desc, &defined_templates, list) { if ((strcmp(template_desc->name, name) == 0) || (strcmp(template_desc->fmt, name) == 0)) { found = 1; break; } } rcu_read_unlock(); return found ? template_desc : NULL; } static const struct ima_template_field * lookup_template_field(const char *field_id) { int i; for (i = 0; i < ARRAY_SIZE(supported_fields); i++) if (strncmp(supported_fields[i].field_id, field_id, IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0) return &supported_fields[i]; return NULL; } static int template_fmt_size(const char *template_fmt) { char c; int template_fmt_len = strlen(template_fmt); int i = 0, j = 0; while (i < template_fmt_len) { c = template_fmt[i]; if (c == '|') j++; i++; } return j + 1; } int template_desc_init_fields(const char *template_fmt, const struct ima_template_field ***fields, int *num_fields) { const char *template_fmt_ptr; const struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX]; int template_num_fields; int i, len; if (num_fields && *num_fields > 0) /* already initialized? */ return 0; template_num_fields = template_fmt_size(template_fmt); if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) { pr_err("format string '%s' contains too many fields\n", template_fmt); return -EINVAL; } for (i = 0, template_fmt_ptr = template_fmt; i < template_num_fields; i++, template_fmt_ptr += len + 1) { char tmp_field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN + 1]; len = strchrnul(template_fmt_ptr, '|') - template_fmt_ptr; if (len == 0 || len > IMA_TEMPLATE_FIELD_ID_MAX_LEN) { pr_err("Invalid field with length %d\n", len); return -EINVAL; } memcpy(tmp_field_id, template_fmt_ptr, len); tmp_field_id[len] = '\0'; found_fields[i] = lookup_template_field(tmp_field_id); if (!found_fields[i]) { pr_err("field '%s' not found\n", tmp_field_id); return -ENOENT; } } if (fields && num_fields) { *fields = kmalloc_array(i, sizeof(**fields), GFP_KERNEL); if (*fields == NULL) return -ENOMEM; memcpy(*fields, found_fields, i * sizeof(**fields)); *num_fields = i; } return 0; } void ima_init_template_list(void) { int i; if (!list_empty(&defined_templates)) return; spin_lock(&template_list); for (i = 0; i < ARRAY_SIZE(builtin_templates); i++) { list_add_tail_rcu(&builtin_templates[i].list, &defined_templates); } spin_unlock(&template_list); } struct ima_template_desc *ima_template_desc_current(void) { if (!ima_template) { ima_init_template_list(); ima_template = lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE); } return ima_template; } struct ima_template_desc *ima_template_desc_buf(void) { if (!ima_buf_template) { ima_init_template_list(); ima_buf_template = lookup_template_desc("ima-buf"); } return ima_buf_template; } int __init ima_init_template(void) { struct ima_template_desc *template = ima_template_desc_current(); int result; result = template_desc_init_fields(template->fmt, &(template->fields), &(template->num_fields)); if (result < 0) { pr_err("template %s init failed, result: %d\n", (strlen(template->name) ? template->name : template->fmt), result); return result; } template = ima_template_desc_buf(); if (!template) { pr_err("Failed to get ima-buf template\n"); return -EINVAL; } result = template_desc_init_fields(template->fmt, &(template->fields), &(template->num_fields)); if (result < 0) pr_err("template %s init failed, result: %d\n", (strlen(template->name) ? template->name : template->fmt), result); return result; } static struct ima_template_desc *restore_template_fmt(char *template_name) { struct ima_template_desc *template_desc = NULL; int ret; ret = template_desc_init_fields(template_name, NULL, NULL); if (ret < 0) { pr_err("attempting to initialize the template \"%s\" failed\n", template_name); goto out; } template_desc = kzalloc(sizeof(*template_desc), GFP_KERNEL); if (!template_desc) goto out; template_desc->name = ""; template_desc->fmt = kstrdup(template_name, GFP_KERNEL); if (!template_desc->fmt) { kfree(template_desc); template_desc = NULL; goto out; } spin_lock(&template_list); list_add_tail_rcu(&template_desc->list, &defined_templates); spin_unlock(&template_list); out: return template_desc; } static int ima_restore_template_data(struct ima_template_desc *template_desc, void *template_data, int template_data_size, struct ima_template_entry **entry) { struct tpm_digest *digests; int ret = 0; int i; *entry = kzalloc(struct_size(*entry, template_data, template_desc->num_fields), GFP_NOFS); if (!*entry) return -ENOMEM; digests = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots, sizeof(*digests), GFP_NOFS); if (!digests) { kfree(*entry); return -ENOMEM; } (*entry)->digests = digests; ret = ima_parse_buf(template_data, template_data + template_data_size, NULL, template_desc->num_fields, (*entry)->template_data, NULL, NULL, ENFORCE_FIELDS | ENFORCE_BUFEND, "template data"); if (ret < 0) { kfree((*entry)->digests); kfree(*entry); return ret; } (*entry)->template_desc = template_desc; for (i = 0; i < template_desc->num_fields; i++) { struct ima_field_data *field_data = &(*entry)->template_data[i]; u8 *data = field_data->data; (*entry)->template_data[i].data = kzalloc(field_data->len + 1, GFP_KERNEL); if (!(*entry)->template_data[i].data) { ret = -ENOMEM; break; } memcpy((*entry)->template_data[i].data, data, field_data->len); (*entry)->template_data_len += sizeof(field_data->len); (*entry)->template_data_len += field_data->len; } if (ret < 0) { ima_free_template_entry(*entry); *entry = NULL; } return ret; } /* Restore the serialized binary measurement list without extending PCRs. */ int ima_restore_measurement_list(loff_t size, void *buf) { char template_name[MAX_TEMPLATE_NAME_LEN]; unsigned char zero[TPM_DIGEST_SIZE] = { 0 }; struct ima_kexec_hdr *khdr = buf; struct ima_field_data hdr[HDR__LAST] = { [HDR_PCR] = {.len = sizeof(u32)}, [HDR_DIGEST] = {.len = TPM_DIGEST_SIZE}, }; void *bufp = buf + sizeof(*khdr); void *bufendp; struct ima_template_entry *entry; struct ima_template_desc *template_desc; DECLARE_BITMAP(hdr_mask, HDR__LAST); unsigned long count = 0; int ret = 0; if (!buf || size < sizeof(*khdr)) return 0; if (ima_canonical_fmt) { khdr->version = le16_to_cpu((__force __le16)khdr->version); khdr->count = le64_to_cpu((__force __le64)khdr->count); khdr->buffer_size = le64_to_cpu((__force __le64)khdr->buffer_size); } if (khdr->version != 1) { pr_err("attempting to restore a incompatible measurement list"); return -EINVAL; } if (khdr->count > ULONG_MAX - 1) { pr_err("attempting to restore too many measurements"); return -EINVAL; } bitmap_zero(hdr_mask, HDR__LAST); bitmap_set(hdr_mask, HDR_PCR, 1); bitmap_set(hdr_mask, HDR_DIGEST, 1); /* * ima kexec buffer prefix: version, buffer size, count * v1 format: pcr, digest, template-name-len, template-name, * template-data-size, template-data */ bufendp = buf + khdr->buffer_size; while ((bufp < bufendp) && (count++ < khdr->count)) { int enforce_mask = ENFORCE_FIELDS; enforce_mask |= (count == khdr->count) ? ENFORCE_BUFEND : 0; ret = ima_parse_buf(bufp, bufendp, &bufp, HDR__LAST, hdr, NULL, hdr_mask, enforce_mask, "entry header"); if (ret < 0) break; if (hdr[HDR_TEMPLATE_NAME].len >= MAX_TEMPLATE_NAME_LEN) { pr_err("attempting to restore a template name that is too long\n"); ret = -EINVAL; break; } /* template name is not null terminated */ memcpy(template_name, hdr[HDR_TEMPLATE_NAME].data, hdr[HDR_TEMPLATE_NAME].len); template_name[hdr[HDR_TEMPLATE_NAME].len] = 0; if (strcmp(template_name, "ima") == 0) { pr_err("attempting to restore an unsupported template \"%s\" failed\n", template_name); ret = -EINVAL; break; } template_desc = lookup_template_desc(template_name); if (!template_desc) { template_desc = restore_template_fmt(template_name); if (!template_desc) break; } /* * Only the running system's template format is initialized * on boot. As needed, initialize the other template formats. */ ret = template_desc_init_fields(template_desc->fmt, &(template_desc->fields), &(template_desc->num_fields)); if (ret < 0) { pr_err("attempting to restore the template fmt \"%s\" failed\n", template_desc->fmt); ret = -EINVAL; break; } ret = ima_restore_template_data(template_desc, hdr[HDR_TEMPLATE_DATA].data, hdr[HDR_TEMPLATE_DATA].len, &entry); if (ret < 0) break; if (memcmp(hdr[HDR_DIGEST].data, zero, sizeof(zero))) { ret = ima_calc_field_array_hash( &entry->template_data[0], entry); if (ret < 0) { pr_err("cannot calculate template digest\n"); ret = -EINVAL; break; } } entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) : le32_to_cpu(*(__le32 *)(hdr[HDR_PCR].data)); ret = ima_restore_measurement_entry(entry); if (ret < 0) break; } return ret; }
119 118 14 104 105 125 14 14 123 14 14 14 14 14 14 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/dma-resv.h> #include <linux/dma-fence-chain.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_atomic_uapi.h> #include <drm/drm_framebuffer.h> #include <drm/drm_gem.h> #include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_simple_kms_helper.h> #include "drm_internal.h" /** * DOC: overview * * The GEM atomic helpers library implements generic atomic-commit * functions for drivers that use GEM objects. Currently, it provides * synchronization helpers, and plane state and framebuffer BO mappings * for planes with shadow buffers. * * Before scanout, a plane's framebuffer needs to be synchronized with * possible writers that draw into the framebuffer. All drivers should * call drm_gem_plane_helper_prepare_fb() from their implementation of * struct &drm_plane_helper.prepare_fb . It sets the plane's fence from * the framebuffer so that the DRM core can synchronize access automatically. * drm_gem_plane_helper_prepare_fb() can also be used directly as * implementation of prepare_fb. * * .. code-block:: c * * #include <drm/drm_gem_atomic_helper.h> * * struct drm_plane_helper_funcs driver_plane_helper_funcs = { * ..., * . prepare_fb = drm_gem_plane_helper_prepare_fb, * }; * * A driver using a shadow buffer copies the content of the shadow buffers * into the HW's framebuffer memory during an atomic update. This requires * a mapping of the shadow buffer into kernel address space. The mappings * cannot be established by commit-tail functions, such as atomic_update, * as this would violate locking rules around dma_buf_vmap(). * * The helpers for shadow-buffered planes establish and release mappings, * and provide struct drm_shadow_plane_state, which stores the plane's mapping * for commit-tail functions. * * Shadow-buffered planes can easily be enabled by using the provided macros * %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS. * These macros set up the plane and plane-helper callbacks to point to the * shadow-buffer helpers. * * .. code-block:: c * * #include <drm/drm_gem_atomic_helper.h> * * struct drm_plane_funcs driver_plane_funcs = { * ..., * DRM_GEM_SHADOW_PLANE_FUNCS, * }; * * struct drm_plane_helper_funcs driver_plane_helper_funcs = { * ..., * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, * }; * * In the driver's atomic-update function, shadow-buffer mappings are available * from the plane state. Use to_drm_shadow_plane_state() to upcast from * struct drm_plane_state. * * .. code-block:: c * * void driver_plane_atomic_update(struct drm_plane *plane, * struct drm_plane_state *old_plane_state) * { * struct drm_plane_state *plane_state = plane->state; * struct drm_shadow_plane_state *shadow_plane_state = * to_drm_shadow_plane_state(plane_state); * * // access shadow buffer via shadow_plane_state->map * } * * A mapping address for each of the framebuffer's buffer object is stored in * struct &drm_shadow_plane_state.map. The mappings are valid while the state * is being used. * * Drivers that use struct drm_simple_display_pipe can use * %DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS to initialize the rsp * callbacks. Access to shadow-buffer mappings is similar to regular * atomic_update. * * .. code-block:: c * * struct drm_simple_display_pipe_funcs driver_pipe_funcs = { * ..., * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS, * }; * * void driver_pipe_enable(struct drm_simple_display_pipe *pipe, * struct drm_crtc_state *crtc_state, * struct drm_plane_state *plane_state) * { * struct drm_shadow_plane_state *shadow_plane_state = * to_drm_shadow_plane_state(plane_state); * * // access shadow buffer via shadow_plane_state->map * } */ /* * Plane Helpers */ /** * drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer * @plane: Plane * @state: Plane state the fence will be attached to * * This function extracts the exclusive fence from &drm_gem_object.resv and * attaches it to plane state for the atomic helper to wait on. This is * necessary to correctly implement implicit synchronization for any buffers * shared as a struct &dma_buf. This function can be used as the * &drm_plane_helper_funcs.prepare_fb callback. * * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple * GEM based framebuffer drivers which have their buffers always pinned in * memory. * * This function is the default implementation for GEM drivers of * &drm_plane_helper_funcs.prepare_fb if no callback is provided. */ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state) { struct dma_fence *fence = dma_fence_get(state->fence); enum dma_resv_usage usage; size_t i; int ret; if (!state->fb) return 0; /* * Only add the kernel fences here if there is already a fence set via * explicit fencing interfaces on the atomic ioctl. * * This way explicit fencing can be used to overrule implicit fencing, * which is important to make explicit fencing use-cases work: One * example is using one buffer for 2 screens with different refresh * rates. Implicit fencing will clamp rendering to the refresh rate of * the slower screen, whereas explicit fence allows 2 independent * render and display loops on a single buffer. If a driver allows * obeys both implicit and explicit fences for plane updates, then it * will break all the benefits of explicit fencing. */ usage = fence ? DMA_RESV_USAGE_KERNEL : DMA_RESV_USAGE_WRITE; for (i = 0; i < state->fb->format->num_planes; ++i) { struct drm_gem_object *obj = drm_gem_fb_get_obj(state->fb, i); struct dma_fence *new; if (!obj) { ret = -EINVAL; goto error; } ret = dma_resv_get_singleton(obj->resv, usage, &new); if (ret) goto error; if (new && fence) { struct dma_fence_chain *chain = dma_fence_chain_alloc(); if (!chain) { ret = -ENOMEM; goto error; } dma_fence_chain_init(chain, fence, new, 1); fence = &chain->base; } else if (new) { fence = new; } } dma_fence_put(state->fence); state->fence = fence; return 0; error: dma_fence_put(fence); return ret; } EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb); /* * Shadow-buffered Planes */ /** * __drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state * @plane: the plane * @new_shadow_plane_state: the new shadow-buffered plane state * * This function duplicates shadow-buffered plane state. This is helpful for drivers * that subclass struct drm_shadow_plane_state. * * The function does not duplicate existing mappings of the shadow buffers. * Mappings are maintained during the atomic commit by the plane's prepare_fb * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb() * for corresponding helpers. */ void __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane, struct drm_shadow_plane_state *new_shadow_plane_state) { struct drm_plane_state *plane_state = plane->state; struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base); drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state, &shadow_plane_state->fmtcnv_state); } EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state); /** * drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state * @plane: the plane * * This function implements struct &drm_plane_funcs.atomic_duplicate_state for * shadow-buffered planes. It assumes the existing state to be of type * struct drm_shadow_plane_state and it allocates the new state to be of this * type. * * The function does not duplicate existing mappings of the shadow buffers. * Mappings are maintained during the atomic commit by the plane's prepare_fb * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb() * for corresponding helpers. * * Returns: * A pointer to a new plane state on success, or NULL otherwise. */ struct drm_plane_state * drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane) { struct drm_plane_state *plane_state = plane->state; struct drm_shadow_plane_state *new_shadow_plane_state; if (!plane_state) return NULL; new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL); if (!new_shadow_plane_state) return NULL; __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state); return &new_shadow_plane_state->base; } EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state); /** * __drm_gem_destroy_shadow_plane_state - cleans up shadow-buffered plane state * @shadow_plane_state: the shadow-buffered plane state * * This function cleans up shadow-buffered plane state. Helpful for drivers that * subclass struct drm_shadow_plane_state. */ void __drm_gem_destroy_shadow_plane_state(struct drm_shadow_plane_state *shadow_plane_state) { drm_format_conv_state_release(&shadow_plane_state->fmtcnv_state); __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base); } EXPORT_SYMBOL(__drm_gem_destroy_shadow_plane_state); /** * drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state * @plane: the plane * @plane_state: the plane state of type struct drm_shadow_plane_state * * This function implements struct &drm_plane_funcs.atomic_destroy_state * for shadow-buffered planes. It expects that mappings of shadow buffers * have been released already. */ void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane, struct drm_plane_state *plane_state) { struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); __drm_gem_destroy_shadow_plane_state(shadow_plane_state); kfree(shadow_plane_state); } EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state); /** * __drm_gem_reset_shadow_plane - resets a shadow-buffered plane * @plane: the plane * @shadow_plane_state: the shadow-buffered plane state * * This function resets state for shadow-buffered planes. Helpful * for drivers that subclass struct drm_shadow_plane_state. */ void __drm_gem_reset_shadow_plane(struct drm_plane *plane, struct drm_shadow_plane_state *shadow_plane_state) { __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base); drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state); } EXPORT_SYMBOL(__drm_gem_reset_shadow_plane); /** * drm_gem_reset_shadow_plane - resets a shadow-buffered plane * @plane: the plane * * This function implements struct &drm_plane_funcs.reset_plane for * shadow-buffered planes. It assumes the current plane state to be * of type struct drm_shadow_plane and it allocates the new state of * this type. */ void drm_gem_reset_shadow_plane(struct drm_plane *plane) { struct drm_shadow_plane_state *shadow_plane_state; if (plane->state) { drm_gem_destroy_shadow_plane_state(plane, plane->state); plane->state = NULL; /* must be set to NULL here */ } shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL); if (!shadow_plane_state) return; __drm_gem_reset_shadow_plane(plane, shadow_plane_state); } EXPORT_SYMBOL(drm_gem_reset_shadow_plane); /** * drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access * @plane: the plane * @plane_state: the plane state of type struct drm_shadow_plane_state * * This function implements struct &drm_plane_helper_funcs.begin_fb_access. It * maps all buffer objects of the plane's framebuffer into kernel address * space and stores them in struct &drm_shadow_plane_state.map. The first data * bytes are available in struct &drm_shadow_plane_state.data. * * See drm_gem_end_shadow_fb_access() for cleanup. * * Returns: * 0 on success, or a negative errno code otherwise. */ int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) { struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; if (!fb) return 0; return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data); } EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access); /** * drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access * @plane: the plane * @plane_state: the plane state of type struct drm_shadow_plane_state * * This function implements struct &drm_plane_helper_funcs.end_fb_access. It * undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order. * * See drm_gem_begin_shadow_fb_access() for more information. */ void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state) { struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; if (!fb) return; drm_gem_fb_vunmap(fb, shadow_plane_state->map); } EXPORT_SYMBOL(drm_gem_end_shadow_fb_access); /** * drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access * @pipe: the simple display pipe * @plane_state: the plane state of type struct drm_shadow_plane_state * * This function implements struct drm_simple_display_funcs.begin_fb_access. * * See drm_gem_begin_shadow_fb_access() for details and * drm_gem_simple_kms_cleanup_shadow_fb() for cleanup. * * Returns: * 0 on success, or a negative errno code otherwise. */ int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state) { return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state); } EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access); /** * drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access * @pipe: the simple display pipe * @plane_state: the plane state of type struct drm_shadow_plane_state * * This function implements struct drm_simple_display_funcs.end_fb_access. * It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in * reverse order. * * See drm_gem_simple_kms_begin_shadow_fb_access(). */ void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state) { drm_gem_end_shadow_fb_access(&pipe->plane, plane_state); } EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access); /** * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane * @pipe: the simple display pipe * * This function implements struct drm_simple_display_funcs.reset_plane * for shadow-buffered planes. */ void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe) { drm_gem_reset_shadow_plane(&pipe->plane); } EXPORT_SYMBOL(drm_gem_simple_kms_reset_shadow_plane); /** * drm_gem_simple_kms_duplicate_shadow_plane_state - duplicates shadow-buffered plane state * @pipe: the simple display pipe * * This function implements struct drm_simple_display_funcs.duplicate_plane_state * for shadow-buffered planes. It does not duplicate existing mappings of the shadow * buffers. Mappings are maintained during the atomic commit by the plane's prepare_fb * and cleanup_fb helpers. * * Returns: * A pointer to a new plane state on success, or NULL otherwise. */ struct drm_plane_state * drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe) { return drm_gem_duplicate_shadow_plane_state(&pipe->plane); } EXPORT_SYMBOL(drm_gem_simple_kms_duplicate_shadow_plane_state); /** * drm_gem_simple_kms_destroy_shadow_plane_state - resets shadow-buffered plane state * @pipe: the simple display pipe * @plane_state: the plane state of type struct drm_shadow_plane_state * * This function implements struct drm_simple_display_funcs.destroy_plane_state * for shadow-buffered planes. It expects that mappings of shadow buffers * have been released already. */ void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe, struct drm_plane_state *plane_state) { drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state); } EXPORT_SYMBOL(drm_gem_simple_kms_destroy_shadow_plane_state);
4 2 1 1 17 17 17 17 2 2 1 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 // SPDX-License-Identifier: GPL-2.0-only /* * inode.c - part of tracefs, a pseudo file system for activating tracing * * Based on debugfs by: Greg Kroah-Hartman <greg@kroah.com> * * Copyright (C) 2014 Red Hat Inc, author: Steven Rostedt <srostedt@redhat.com> * * tracefs is the file system that is used by the tracing infrastructure. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> #include <linux/kobject.h> #include <linux/namei.h> #include <linux/tracefs.h> #include <linux/fsnotify.h> #include <linux/security.h> #include <linux/seq_file.h> #include <linux/magic.h> #include <linux/slab.h> #include "internal.h" #define TRACEFS_DEFAULT_MODE 0700 static struct kmem_cache *tracefs_inode_cachep __ro_after_init; static struct vfsmount *tracefs_mount; static int tracefs_mount_count; static bool tracefs_registered; /* * Keep track of all tracefs_inodes in order to update their * flags if necessary on a remount. */ static DEFINE_SPINLOCK(tracefs_inode_lock); static LIST_HEAD(tracefs_inodes); static struct inode *tracefs_alloc_inode(struct super_block *sb) { struct tracefs_inode *ti; unsigned long flags; ti = alloc_inode_sb(sb, tracefs_inode_cachep, GFP_KERNEL); if (!ti) return NULL; spin_lock_irqsave(&tracefs_inode_lock, flags); list_add_rcu(&ti->list, &tracefs_inodes); spin_unlock_irqrestore(&tracefs_inode_lock, flags); return &ti->vfs_inode; } static void tracefs_free_inode(struct inode *inode) { struct tracefs_inode *ti = get_tracefs(inode); kmem_cache_free(tracefs_inode_cachep, ti); } static void tracefs_destroy_inode(struct inode *inode) { struct tracefs_inode *ti = get_tracefs(inode); unsigned long flags; spin_lock_irqsave(&tracefs_inode_lock, flags); list_del_rcu(&ti->list); spin_unlock_irqrestore(&tracefs_inode_lock, flags); } static ssize_t default_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return 0; } static ssize_t default_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { return count; } static const struct file_operations tracefs_file_operations = { .read = default_read_file, .write = default_write_file, .open = simple_open, .llseek = noop_llseek, }; static struct tracefs_dir_ops { int (*mkdir)(const char *name); int (*rmdir)(const char *name); } tracefs_ops __ro_after_init; static char *get_dname(struct dentry *dentry) { const char *dname; char *name; int len = dentry->d_name.len; dname = dentry->d_name.name; name = kmalloc(len + 1, GFP_KERNEL); if (!name) return NULL; memcpy(name, dname, len); name[len] = 0; return name; } static int tracefs_syscall_mkdir(struct mnt_idmap *idmap, struct inode *inode, struct dentry *dentry, umode_t mode) { struct tracefs_inode *ti; char *name; int ret; name = get_dname(dentry); if (!name) return -ENOMEM; /* * This is a new directory that does not take the default of * the rootfs. It becomes the default permissions for all the * files and directories underneath it. */ ti = get_tracefs(inode); ti->flags |= TRACEFS_INSTANCE_INODE; ti->private = inode; /* * The mkdir call can call the generic functions that create * the files within the tracefs system. It is up to the individual * mkdir routine to handle races. */ inode_unlock(inode); ret = tracefs_ops.mkdir(name); inode_lock(inode); kfree(name); return ret; } static int tracefs_syscall_rmdir(struct inode *inode, struct dentry *dentry) { char *name; int ret; name = get_dname(dentry); if (!name) return -ENOMEM; /* * The rmdir call can call the generic functions that create * the files within the tracefs system. It is up to the individual * rmdir routine to handle races. * This time we need to unlock not only the parent (inode) but * also the directory that is being deleted. */ inode_unlock(inode); inode_unlock(d_inode(dentry)); ret = tracefs_ops.rmdir(name); inode_lock_nested(inode, I_MUTEX_PARENT); inode_lock(d_inode(dentry)); kfree(name); return ret; } static void set_tracefs_inode_owner(struct inode *inode) { struct tracefs_inode *ti = get_tracefs(inode); struct inode *root_inode = ti->private; kuid_t uid; kgid_t gid; uid = root_inode->i_uid; gid = root_inode->i_gid; /* * If the root is not the mount point, then check the root's * permissions. If it was never set, then default to the * mount point. */ if (root_inode != d_inode(root_inode->i_sb->s_root)) { struct tracefs_inode *rti; rti = get_tracefs(root_inode); root_inode = d_inode(root_inode->i_sb->s_root); if (!(rti->flags & TRACEFS_UID_PERM_SET)) uid = root_inode->i_uid; if (!(rti->flags & TRACEFS_GID_PERM_SET)) gid = root_inode->i_gid; } /* * If this inode has never been referenced, then update * the permissions to the superblock. */ if (!(ti->flags & TRACEFS_UID_PERM_SET)) inode->i_uid = uid; if (!(ti->flags & TRACEFS_GID_PERM_SET)) inode->i_gid = gid; } static int tracefs_permission(struct mnt_idmap *idmap, struct inode *inode, int mask) { set_tracefs_inode_owner(inode); return generic_permission(idmap, inode, mask); } static int tracefs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int flags) { struct inode *inode = d_backing_inode(path->dentry); set_tracefs_inode_owner(inode); generic_fillattr(idmap, request_mask, inode, stat); return 0; } static int tracefs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { unsigned int ia_valid = attr->ia_valid; struct inode *inode = d_inode(dentry); struct tracefs_inode *ti = get_tracefs(inode); if (ia_valid & ATTR_UID) ti->flags |= TRACEFS_UID_PERM_SET; if (ia_valid & ATTR_GID) ti->flags |= TRACEFS_GID_PERM_SET; return simple_setattr(idmap, dentry, attr); } static const struct inode_operations tracefs_instance_dir_inode_operations = { .lookup = simple_lookup, .mkdir = tracefs_syscall_mkdir, .rmdir = tracefs_syscall_rmdir, .permission = tracefs_permission, .getattr = tracefs_getattr, .setattr = tracefs_setattr, }; static const struct inode_operations tracefs_dir_inode_operations = { .lookup = simple_lookup, .permission = tracefs_permission, .getattr = tracefs_getattr, .setattr = tracefs_setattr, }; static const struct inode_operations tracefs_file_inode_operations = { .permission = tracefs_permission, .getattr = tracefs_getattr, .setattr = tracefs_setattr, }; struct inode *tracefs_get_inode(struct super_block *sb) { struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); simple_inode_init_ts(inode); } return inode; } struct tracefs_fs_info { kuid_t uid; kgid_t gid; umode_t mode; /* Opt_* bitfield. */ unsigned int opts; }; enum { Opt_uid, Opt_gid, Opt_mode, }; static const struct fs_parameter_spec tracefs_param_specs[] = { fsparam_gid ("gid", Opt_gid), fsparam_u32oct ("mode", Opt_mode), fsparam_uid ("uid", Opt_uid), {} }; static int tracefs_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct tracefs_fs_info *opts = fc->s_fs_info; struct fs_parse_result result; int opt; opt = fs_parse(fc, tracefs_param_specs, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_uid: opts->uid = result.uid; break; case Opt_gid: opts->gid = result.gid; break; case Opt_mode: opts->mode = result.uint_32 & S_IALLUGO; break; /* * We might like to report bad mount options here; * but traditionally tracefs has ignored all mount options */ } opts->opts |= BIT(opt); return 0; } static int tracefs_apply_options(struct super_block *sb, bool remount) { struct tracefs_fs_info *fsi = sb->s_fs_info; struct inode *inode = d_inode(sb->s_root); struct tracefs_inode *ti; bool update_uid, update_gid; umode_t tmp_mode; /* * On remount, only reset mode/uid/gid if they were provided as mount * options. */ if (!remount || fsi->opts & BIT(Opt_mode)) { tmp_mode = READ_ONCE(inode->i_mode) & ~S_IALLUGO; tmp_mode |= fsi->mode; WRITE_ONCE(inode->i_mode, tmp_mode); } if (!remount || fsi->opts & BIT(Opt_uid)) inode->i_uid = fsi->uid; if (!remount || fsi->opts & BIT(Opt_gid)) inode->i_gid = fsi->gid; if (remount && (fsi->opts & BIT(Opt_uid) || fsi->opts & BIT(Opt_gid))) { update_uid = fsi->opts & BIT(Opt_uid); update_gid = fsi->opts & BIT(Opt_gid); rcu_read_lock(); list_for_each_entry_rcu(ti, &tracefs_inodes, list) { if (update_uid) { ti->flags &= ~TRACEFS_UID_PERM_SET; ti->vfs_inode.i_uid = fsi->uid; } if (update_gid) { ti->flags &= ~TRACEFS_GID_PERM_SET; ti->vfs_inode.i_gid = fsi->gid; } /* * Note, the above ti->vfs_inode updates are * used in eventfs_remount() so they must come * before calling it. */ if (ti->flags & TRACEFS_EVENT_INODE) eventfs_remount(ti, update_uid, update_gid); } rcu_read_unlock(); } return 0; } static int tracefs_reconfigure(struct fs_context *fc) { struct super_block *sb = fc->root->d_sb; struct tracefs_fs_info *sb_opts = sb->s_fs_info; struct tracefs_fs_info *new_opts = fc->s_fs_info; if (!new_opts) return 0; sync_filesystem(sb); /* structure copy of new mount options to sb */ *sb_opts = *new_opts; return tracefs_apply_options(sb, true); } static int tracefs_show_options(struct seq_file *m, struct dentry *root) { struct tracefs_fs_info *fsi = root->d_sb->s_fs_info; if (!uid_eq(fsi->uid, GLOBAL_ROOT_UID)) seq_printf(m, ",uid=%u", from_kuid_munged(&init_user_ns, fsi->uid)); if (!gid_eq(fsi->gid, GLOBAL_ROOT_GID)) seq_printf(m, ",gid=%u", from_kgid_munged(&init_user_ns, fsi->gid)); if (fsi->mode != TRACEFS_DEFAULT_MODE) seq_printf(m, ",mode=%o", fsi->mode); return 0; } static int tracefs_drop_inode(struct inode *inode) { struct tracefs_inode *ti = get_tracefs(inode); /* * This inode is being freed and cannot be used for * eventfs. Clear the flag so that it doesn't call into * eventfs during the remount flag updates. The eventfs_inode * gets freed after an RCU cycle, so the content will still * be safe if the iteration is going on now. */ ti->flags &= ~TRACEFS_EVENT_INODE; return 1; } static const struct super_operations tracefs_super_operations = { .alloc_inode = tracefs_alloc_inode, .free_inode = tracefs_free_inode, .destroy_inode = tracefs_destroy_inode, .drop_inode = tracefs_drop_inode, .statfs = simple_statfs, .show_options = tracefs_show_options, }; /* * It would be cleaner if eventfs had its own dentry ops. * * Note that d_revalidate is called potentially under RCU, * so it can't take the eventfs mutex etc. It's fine - if * we open a file just as it's marked dead, things will * still work just fine, and just see the old stale case. */ static void tracefs_d_release(struct dentry *dentry) { if (dentry->d_fsdata) eventfs_d_release(dentry); } static int tracefs_d_revalidate(struct dentry *dentry, unsigned int flags) { struct eventfs_inode *ei = dentry->d_fsdata; return !(ei && ei->is_freed); } static const struct dentry_operations tracefs_dentry_operations = { .d_revalidate = tracefs_d_revalidate, .d_release = tracefs_d_release, }; static int tracefs_fill_super(struct super_block *sb, struct fs_context *fc) { static const struct tree_descr trace_files[] = {{""}}; int err; err = simple_fill_super(sb, TRACEFS_MAGIC, trace_files); if (err) return err; sb->s_op = &tracefs_super_operations; sb->s_d_op = &tracefs_dentry_operations; return 0; } static int tracefs_get_tree(struct fs_context *fc) { int err = get_tree_single(fc, tracefs_fill_super); if (err) return err; return tracefs_reconfigure(fc); } static void tracefs_free_fc(struct fs_context *fc) { kfree(fc->s_fs_info); } static const struct fs_context_operations tracefs_context_ops = { .free = tracefs_free_fc, .parse_param = tracefs_parse_param, .get_tree = tracefs_get_tree, .reconfigure = tracefs_reconfigure, }; static int tracefs_init_fs_context(struct fs_context *fc) { struct tracefs_fs_info *fsi; fsi = kzalloc(sizeof(struct tracefs_fs_info), GFP_KERNEL); if (!fsi) return -ENOMEM; fsi->mode = TRACEFS_DEFAULT_MODE; fc->s_fs_info = fsi; fc->ops = &tracefs_context_ops; return 0; } static struct file_system_type trace_fs_type = { .owner = THIS_MODULE, .name = "tracefs", .init_fs_context = tracefs_init_fs_context, .parameters = tracefs_param_specs, .kill_sb = kill_litter_super, }; MODULE_ALIAS_FS("tracefs"); struct dentry *tracefs_start_creating(const char *name, struct dentry *parent) { struct dentry *dentry; int error; pr_debug("tracefs: creating file '%s'\n",name); error = simple_pin_fs(&trace_fs_type, &tracefs_mount, &tracefs_mount_count); if (error) return ERR_PTR(error); /* If the parent is not specified, we create it in the root. * We need the root dentry to do this, which is in the super * block. A pointer to that is in the struct vfsmount that we * have around. */ if (!parent) parent = tracefs_mount->mnt_root; inode_lock(d_inode(parent)); if (unlikely(IS_DEADDIR(d_inode(parent)))) dentry = ERR_PTR(-ENOENT); else dentry = lookup_one_len(name, parent, strlen(name)); if (!IS_ERR(dentry) && d_inode(dentry)) { dput(dentry); dentry = ERR_PTR(-EEXIST); } if (IS_ERR(dentry)) { inode_unlock(d_inode(parent)); simple_release_fs(&tracefs_mount, &tracefs_mount_count); } return dentry; } struct dentry *tracefs_failed_creating(struct dentry *dentry) { inode_unlock(d_inode(dentry->d_parent)); dput(dentry); simple_release_fs(&tracefs_mount, &tracefs_mount_count); return NULL; } struct dentry *tracefs_end_creating(struct dentry *dentry) { inode_unlock(d_inode(dentry->d_parent)); return dentry; } /* Find the inode that this will use for default */ static struct inode *instance_inode(struct dentry *parent, struct inode *inode) { struct tracefs_inode *ti; /* If parent is NULL then use root inode */ if (!parent) return d_inode(inode->i_sb->s_root); /* Find the inode that is flagged as an instance or the root inode */ while (!IS_ROOT(parent)) { ti = get_tracefs(d_inode(parent)); if (ti->flags & TRACEFS_INSTANCE_INODE) break; parent = parent->d_parent; } return d_inode(parent); } /** * tracefs_create_file - create a file in the tracefs filesystem * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * file will be created in the root of the tracefs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * * This is the basic "create a file" function for tracefs. It allows for a * wide range of flexibility in creating a file, or a directory (if you want * to create a directory, the tracefs_create_dir() function is * recommended to be used instead.) * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the tracefs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here.) If an error occurs, %NULL will be returned. * * If tracefs is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *tracefs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct tracefs_inode *ti; struct dentry *dentry; struct inode *inode; if (security_locked_down(LOCKDOWN_TRACEFS)) return NULL; if (!(mode & S_IFMT)) mode |= S_IFREG; BUG_ON(!S_ISREG(mode)); dentry = tracefs_start_creating(name, parent); if (IS_ERR(dentry)) return NULL; inode = tracefs_get_inode(dentry->d_sb); if (unlikely(!inode)) return tracefs_failed_creating(dentry); ti = get_tracefs(inode); ti->private = instance_inode(parent, inode); inode->i_mode = mode; inode->i_op = &tracefs_file_inode_operations; inode->i_fop = fops ? fops : &tracefs_file_operations; inode->i_private = data; inode->i_uid = d_inode(dentry->d_parent)->i_uid; inode->i_gid = d_inode(dentry->d_parent)->i_gid; d_instantiate(dentry, inode); fsnotify_create(d_inode(dentry->d_parent), dentry); return tracefs_end_creating(dentry); } static struct dentry *__create_dir(const char *name, struct dentry *parent, const struct inode_operations *ops) { struct tracefs_inode *ti; struct dentry *dentry = tracefs_start_creating(name, parent); struct inode *inode; if (IS_ERR(dentry)) return NULL; inode = tracefs_get_inode(dentry->d_sb); if (unlikely(!inode)) return tracefs_failed_creating(dentry); /* Do not set bits for OTH */ inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP; inode->i_op = ops; inode->i_fop = &simple_dir_operations; inode->i_uid = d_inode(dentry->d_parent)->i_uid; inode->i_gid = d_inode(dentry->d_parent)->i_gid; ti = get_tracefs(inode); ti->private = instance_inode(parent, inode); /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); d_instantiate(dentry, inode); inc_nlink(d_inode(dentry->d_parent)); fsnotify_mkdir(d_inode(dentry->d_parent), dentry); return tracefs_end_creating(dentry); } /** * tracefs_create_dir - create a directory in the tracefs filesystem * @name: a pointer to a string containing the name of the directory to * create. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is NULL, then the * directory will be created in the root of the tracefs filesystem. * * This function creates a directory in tracefs with the given name. * * This function will return a pointer to a dentry if it succeeds. This * pointer must be passed to the tracefs_remove() function when the file is * to be removed. If an error occurs, %NULL will be returned. * * If tracing is not enabled in the kernel, the value -%ENODEV will be * returned. */ struct dentry *tracefs_create_dir(const char *name, struct dentry *parent) { if (security_locked_down(LOCKDOWN_TRACEFS)) return NULL; return __create_dir(name, parent, &tracefs_dir_inode_operations); } /** * tracefs_create_instance_dir - create the tracing instances directory * @name: The name of the instances directory to create * @parent: The parent directory that the instances directory will exist * @mkdir: The function to call when a mkdir is performed. * @rmdir: The function to call when a rmdir is performed. * * Only one instances directory is allowed. * * The instances directory is special as it allows for mkdir and rmdir * to be done by userspace. When a mkdir or rmdir is performed, the inode * locks are released and the methods passed in (@mkdir and @rmdir) are * called without locks and with the name of the directory being created * within the instances directory. * * Returns the dentry of the instances directory. */ __init struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent, int (*mkdir)(const char *name), int (*rmdir)(const char *name)) { struct dentry *dentry; /* Only allow one instance of the instances directory. */ if (WARN_ON(tracefs_ops.mkdir || tracefs_ops.rmdir)) return NULL; dentry = __create_dir(name, parent, &tracefs_instance_dir_inode_operations); if (!dentry) return NULL; tracefs_ops.mkdir = mkdir; tracefs_ops.rmdir = rmdir; return dentry; } static void remove_one(struct dentry *victim) { simple_release_fs(&tracefs_mount, &tracefs_mount_count); } /** * tracefs_remove - recursively removes a directory * @dentry: a pointer to a the dentry of the directory to be removed. * * This function recursively removes a directory tree in tracefs that * was previously created with a call to another tracefs function * (like tracefs_create_file() or variants thereof.) */ void tracefs_remove(struct dentry *dentry) { if (IS_ERR_OR_NULL(dentry)) return; simple_pin_fs(&trace_fs_type, &tracefs_mount, &tracefs_mount_count); simple_recursive_removal(dentry, remove_one); simple_release_fs(&tracefs_mount, &tracefs_mount_count); } /** * tracefs_initialized - Tells whether tracefs has been registered */ bool tracefs_initialized(void) { return tracefs_registered; } static void init_once(void *foo) { struct tracefs_inode *ti = (struct tracefs_inode *) foo; /* inode_init_once() calls memset() on the vfs_inode portion */ inode_init_once(&ti->vfs_inode); /* Zero out the rest */ memset_after(ti, 0, vfs_inode); } static int __init tracefs_init(void) { int retval; tracefs_inode_cachep = kmem_cache_create("tracefs_inode_cache", sizeof(struct tracefs_inode), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_ACCOUNT), init_once); if (!tracefs_inode_cachep) return -ENOMEM; retval = sysfs_create_mount_point(kernel_kobj, "tracing"); if (retval) return -EINVAL; retval = register_filesystem(&trace_fs_type); if (!retval) tracefs_registered = true; return retval; } core_initcall(tracefs_init);
95 36 36 2 2 1 2 2 5 99 4 94 93 94 1 94 27 27 27 27 27 27 98 98 98 98 93 94 83 94 94 31 31 27 31 27 27 27 27 27 27 27 27 27 27 27 27 27 28 1 1 31 4 27 27 1 1 1 1 31 1 1 1 1 1 27 1 1 1 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 27 31 31 31 4 27 27 27 27 27 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 31 30 31 31 31 31 31 27 27 31 31 31 31 31 31 31 31 31 31 18 18 18 18 18 18 18 180 179 4 4 31 31 31 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 /* * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005-2006, Devicescape Software, Inc. * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2008-2011 Luis R. Rodriguez <mcgrof@qca.qualcomm.com> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2024 Intel Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /** * DOC: Wireless regulatory infrastructure * * The usual implementation is for a driver to read a device EEPROM to * determine which regulatory domain it should be operating under, then * looking up the allowable channels in a driver-local table and finally * registering those channels in the wiphy structure. * * Another set of compliance enforcement is for drivers to use their * own compliance limits which can be stored on the EEPROM. The host * driver or firmware may ensure these are used. * * In addition to all this we provide an extra layer of regulatory * conformance. For drivers which do not have any regulatory * information CRDA provides the complete regulatory solution. * For others it provides a community effort on further restrictions * to enhance compliance. * * Note: When number of rules --> infinity we will not be able to * index on alpha2 any more, instead we'll probably have to * rely on some SHA1 checksum of the regdomain for example. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/ctype.h> #include <linux/nl80211.h> #include <linux/platform_device.h> #include <linux/verification.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/units.h> #include <net/cfg80211.h> #include "core.h" #include "reg.h" #include "rdev-ops.h" #include "nl80211.h" /* * Grace period we give before making sure all current interfaces reside on * channels allowed by the current regulatory domain. */ #define REG_ENFORCE_GRACE_MS 60000 /** * enum reg_request_treatment - regulatory request treatment * * @REG_REQ_OK: continue processing the regulatory request * @REG_REQ_IGNORE: ignore the regulatory request * @REG_REQ_INTERSECT: the regulatory domain resulting from this request should * be intersected with the current one. * @REG_REQ_ALREADY_SET: the regulatory request will not change the current * regulatory settings, and no further processing is required. */ enum reg_request_treatment { REG_REQ_OK, REG_REQ_IGNORE, REG_REQ_INTERSECT, REG_REQ_ALREADY_SET, }; static struct regulatory_request core_request_world = { .initiator = NL80211_REGDOM_SET_BY_CORE, .alpha2[0] = '0', .alpha2[1] = '0', .intersect = false, .processed = true, .country_ie_env = ENVIRON_ANY, }; /* * Receipt of information from last regulatory request, * protected by RTNL (and can be accessed with RCU protection) */ static struct regulatory_request __rcu *last_request = (void __force __rcu *)&core_request_world; /* To trigger userspace events and load firmware */ static struct platform_device *reg_pdev; /* * Central wireless core regulatory domains, we only need two, * the current one and a world regulatory domain in case we have no * information to give us an alpha2. * (protected by RTNL, can be read under RCU) */ const struct ieee80211_regdomain __rcu *cfg80211_regdomain; /* * Number of devices that registered to the core * that support cellular base station regulatory hints * (protected by RTNL) */ static int reg_num_devs_support_basehint; /* * State variable indicating if the platform on which the devices * are attached is operating in an indoor environment. The state variable * is relevant for all registered devices. */ static bool reg_is_indoor; static DEFINE_SPINLOCK(reg_indoor_lock); /* Used to track the userspace process controlling the indoor setting */ static u32 reg_is_indoor_portid; static void restore_regulatory_settings(bool reset_user, bool cached); static void print_regdomain(const struct ieee80211_regdomain *rd); static void reg_process_hint(struct regulatory_request *reg_request); static const struct ieee80211_regdomain *get_cfg80211_regdom(void) { return rcu_dereference_rtnl(cfg80211_regdomain); } /* * Returns the regulatory domain associated with the wiphy. * * Requires any of RTNL, wiphy mutex or RCU protection. */ const struct ieee80211_regdomain *get_wiphy_regdom(struct wiphy *wiphy) { return rcu_dereference_check(wiphy->regd, lockdep_is_held(&wiphy->mtx) || lockdep_rtnl_is_held()); } EXPORT_SYMBOL(get_wiphy_regdom); static const char *reg_dfs_region_str(enum nl80211_dfs_regions dfs_region) { switch (dfs_region) { case NL80211_DFS_UNSET: return "unset"; case NL80211_DFS_FCC: return "FCC"; case NL80211_DFS_ETSI: return "ETSI"; case NL80211_DFS_JP: return "JP"; } return "Unknown"; } enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy) { const struct ieee80211_regdomain *regd = NULL; const struct ieee80211_regdomain *wiphy_regd = NULL; enum nl80211_dfs_regions dfs_region; rcu_read_lock(); regd = get_cfg80211_regdom(); dfs_region = regd->dfs_region; if (!wiphy) goto out; wiphy_regd = get_wiphy_regdom(wiphy); if (!wiphy_regd) goto out; if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { dfs_region = wiphy_regd->dfs_region; goto out; } if (wiphy_regd->dfs_region == regd->dfs_region) goto out; pr_debug("%s: device specific dfs_region (%s) disagrees with cfg80211's central dfs_region (%s)\n", dev_name(&wiphy->dev), reg_dfs_region_str(wiphy_regd->dfs_region), reg_dfs_region_str(regd->dfs_region)); out: rcu_read_unlock(); return dfs_region; } static void rcu_free_regdom(const struct ieee80211_regdomain *r) { if (!r) return; kfree_rcu((struct ieee80211_regdomain *)r, rcu_head); } static struct regulatory_request *get_last_request(void) { return rcu_dereference_rtnl(last_request); } /* Used to queue up regulatory hints */ static LIST_HEAD(reg_requests_list); static DEFINE_SPINLOCK(reg_requests_lock); /* Used to queue up beacon hints for review */ static LIST_HEAD(reg_pending_beacons); static DEFINE_SPINLOCK(reg_pending_beacons_lock); /* Used to keep track of processed beacon hints */ static LIST_HEAD(reg_beacon_list); struct reg_beacon { struct list_head list; struct ieee80211_channel chan; }; static void reg_check_chans_work(struct work_struct *work); static DECLARE_DELAYED_WORK(reg_check_chans, reg_check_chans_work); static void reg_todo(struct work_struct *work); static DECLARE_WORK(reg_work, reg_todo); /* We keep a static world regulatory domain in case of the absence of CRDA */ static const struct ieee80211_regdomain world_regdom = { .n_reg_rules = 8, .alpha2 = "00", .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), /* IEEE 802.11b/g, channels 12..13. */ REG_RULE(2467-10, 2472+10, 20, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW), /* IEEE 802.11 channel 14 - Only JP enables * this and for 802.11b only */ REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_NO_OFDM), /* IEEE 802.11a, channel 36..48 */ REG_RULE(5180-10, 5240+10, 80, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW), /* IEEE 802.11a, channel 52..64 - DFS required */ REG_RULE(5260-10, 5320+10, 80, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW | NL80211_RRF_DFS), /* IEEE 802.11a, channel 100..144 - DFS required */ REG_RULE(5500-10, 5720+10, 160, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_DFS), /* IEEE 802.11a, channel 149..165 */ REG_RULE(5745-10, 5825+10, 80, 6, 20, NL80211_RRF_NO_IR), /* IEEE 802.11ad (60GHz), channels 1..3 */ REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0), } }; /* protected by RTNL */ static const struct ieee80211_regdomain *cfg80211_world_regdom = &world_regdom; static char *ieee80211_regdom = "00"; static char user_alpha2[2]; static const struct ieee80211_regdomain *cfg80211_user_regdom; module_param(ieee80211_regdom, charp, 0444); MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code"); static void reg_free_request(struct regulatory_request *request) { if (request == &core_request_world) return; if (request != get_last_request()) kfree(request); } static void reg_free_last_request(void) { struct regulatory_request *lr = get_last_request(); if (lr != &core_request_world && lr) kfree_rcu(lr, rcu_head); } static void reg_update_last_request(struct regulatory_request *request) { struct regulatory_request *lr; lr = get_last_request(); if (lr == request) return; reg_free_last_request(); rcu_assign_pointer(last_request, request); } static void reset_regdomains(bool full_reset, const struct ieee80211_regdomain *new_regdom) { const struct ieee80211_regdomain *r; ASSERT_RTNL(); r = get_cfg80211_regdom(); /* avoid freeing static information or freeing something twice */ if (r == cfg80211_world_regdom) r = NULL; if (cfg80211_world_regdom == &world_regdom) cfg80211_world_regdom = NULL; if (r == &world_regdom) r = NULL; rcu_free_regdom(r); rcu_free_regdom(cfg80211_world_regdom); cfg80211_world_regdom = &world_regdom; rcu_assign_pointer(cfg80211_regdomain, new_regdom); if (!full_reset) return; reg_update_last_request(&core_request_world); } /* * Dynamic world regulatory domain requested by the wireless * core upon initialization */ static void update_world_regdomain(const struct ieee80211_regdomain *rd) { struct regulatory_request *lr; lr = get_last_request(); WARN_ON(!lr); reset_regdomains(false, rd); cfg80211_world_regdom = rd; } bool is_world_regdom(const char *alpha2) { if (!alpha2) return false; return alpha2[0] == '0' && alpha2[1] == '0'; } static bool is_alpha2_set(const char *alpha2) { if (!alpha2) return false; return alpha2[0] && alpha2[1]; } static bool is_unknown_alpha2(const char *alpha2) { if (!alpha2) return false; /* * Special case where regulatory domain was built by driver * but a specific alpha2 cannot be determined */ return alpha2[0] == '9' && alpha2[1] == '9'; } static bool is_intersected_alpha2(const char *alpha2) { if (!alpha2) return false; /* * Special case where regulatory domain is the * result of an intersection between two regulatory domain * structures */ return alpha2[0] == '9' && alpha2[1] == '8'; } static bool is_an_alpha2(const char *alpha2) { if (!alpha2) return false; return isalpha(alpha2[0]) && isalpha(alpha2[1]); } static bool alpha2_equal(const char *alpha2_x, const char *alpha2_y) { if (!alpha2_x || !alpha2_y) return false; return alpha2_x[0] == alpha2_y[0] && alpha2_x[1] == alpha2_y[1]; } static bool regdom_changes(const char *alpha2) { const struct ieee80211_regdomain *r = get_cfg80211_regdom(); if (!r) return true; return !alpha2_equal(r->alpha2, alpha2); } /* * The NL80211_REGDOM_SET_BY_USER regdom alpha2 is cached, this lets * you know if a valid regulatory hint with NL80211_REGDOM_SET_BY_USER * has ever been issued. */ static bool is_user_regdom_saved(void) { if (user_alpha2[0] == '9' && user_alpha2[1] == '7') return false; /* This would indicate a mistake on the design */ if (WARN(!is_world_regdom(user_alpha2) && !is_an_alpha2(user_alpha2), "Unexpected user alpha2: %c%c\n", user_alpha2[0], user_alpha2[1])) return false; return true; } static const struct ieee80211_regdomain * reg_copy_regd(const struct ieee80211_regdomain *src_regd) { struct ieee80211_regdomain *regd; unsigned int i; regd = kzalloc(struct_size(regd, reg_rules, src_regd->n_reg_rules), GFP_KERNEL); if (!regd) return ERR_PTR(-ENOMEM); memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain)); for (i = 0; i < src_regd->n_reg_rules; i++) memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i], sizeof(struct ieee80211_reg_rule)); return regd; } static void cfg80211_save_user_regdom(const struct ieee80211_regdomain *rd) { ASSERT_RTNL(); if (!IS_ERR(cfg80211_user_regdom)) kfree(cfg80211_user_regdom); cfg80211_user_regdom = reg_copy_regd(rd); } struct reg_regdb_apply_request { struct list_head list; const struct ieee80211_regdomain *regdom; }; static LIST_HEAD(reg_regdb_apply_list); static DEFINE_MUTEX(reg_regdb_apply_mutex); static void reg_regdb_apply(struct work_struct *work) { struct reg_regdb_apply_request *request; rtnl_lock(); mutex_lock(&reg_regdb_apply_mutex); while (!list_empty(&reg_regdb_apply_list)) { request = list_first_entry(&reg_regdb_apply_list, struct reg_regdb_apply_request, list); list_del(&request->list); set_regdom(request->regdom, REGD_SOURCE_INTERNAL_DB); kfree(request); } mutex_unlock(&reg_regdb_apply_mutex); rtnl_unlock(); } static DECLARE_WORK(reg_regdb_work, reg_regdb_apply); static int reg_schedule_apply(const struct ieee80211_regdomain *regdom) { struct reg_regdb_apply_request *request; request = kzalloc(sizeof(struct reg_regdb_apply_request), GFP_KERNEL); if (!request) { kfree(regdom); return -ENOMEM; } request->regdom = regdom; mutex_lock(&reg_regdb_apply_mutex); list_add_tail(&request->list, &reg_regdb_apply_list); mutex_unlock(&reg_regdb_apply_mutex); schedule_work(&reg_regdb_work); return 0; } #ifdef CONFIG_CFG80211_CRDA_SUPPORT /* Max number of consecutive attempts to communicate with CRDA */ #define REG_MAX_CRDA_TIMEOUTS 10 static u32 reg_crda_timeouts; static void crda_timeout_work(struct work_struct *work); static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work); static void crda_timeout_work(struct work_struct *work) { pr_debug("Timeout while waiting for CRDA to reply, restoring regulatory settings\n"); rtnl_lock(); reg_crda_timeouts++; restore_regulatory_settings(true, false); rtnl_unlock(); } static void cancel_crda_timeout(void) { cancel_delayed_work(&crda_timeout); } static void cancel_crda_timeout_sync(void) { cancel_delayed_work_sync(&crda_timeout); } static void reset_crda_timeouts(void) { reg_crda_timeouts = 0; } /* * This lets us keep regulatory code which is updated on a regulatory * basis in userspace. */ static int call_crda(const char *alpha2) { char country[12]; char *env[] = { country, NULL }; int ret; snprintf(country, sizeof(country), "COUNTRY=%c%c", alpha2[0], alpha2[1]); if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n"); return -EINVAL; } if (!is_world_regdom((char *) alpha2)) pr_debug("Calling CRDA for country: %c%c\n", alpha2[0], alpha2[1]); else pr_debug("Calling CRDA to update world regulatory domain\n"); ret = kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env); if (ret) return ret; queue_delayed_work(system_power_efficient_wq, &crda_timeout, msecs_to_jiffies(3142)); return 0; } #else static inline void cancel_crda_timeout(void) {} static inline void cancel_crda_timeout_sync(void) {} static inline void reset_crda_timeouts(void) {} static inline int call_crda(const char *alpha2) { return -ENODATA; } #endif /* CONFIG_CFG80211_CRDA_SUPPORT */ /* code to directly load a firmware database through request_firmware */ static const struct fwdb_header *regdb; struct fwdb_country { u8 alpha2[2]; __be16 coll_ptr; /* this struct cannot be extended */ } __packed __aligned(4); struct fwdb_collection { u8 len; u8 n_rules; u8 dfs_region; /* no optional data yet */ /* aligned to 2, then followed by __be16 array of rule pointers */ } __packed __aligned(4); enum fwdb_flags { FWDB_FLAG_NO_OFDM = BIT(0), FWDB_FLAG_NO_OUTDOOR = BIT(1), FWDB_FLAG_DFS = BIT(2), FWDB_FLAG_NO_IR = BIT(3), FWDB_FLAG_AUTO_BW = BIT(4), }; struct fwdb_wmm_ac { u8 ecw; u8 aifsn; __be16 cot; } __packed; struct fwdb_wmm_rule { struct fwdb_wmm_ac client[IEEE80211_NUM_ACS]; struct fwdb_wmm_ac ap[IEEE80211_NUM_ACS]; } __packed; struct fwdb_rule { u8 len; u8 flags; __be16 max_eirp; __be32 start, end, max_bw; /* start of optional data */ __be16 cac_timeout; __be16 wmm_ptr; } __packed __aligned(4); #define FWDB_MAGIC 0x52474442 #define FWDB_VERSION 20 struct fwdb_header { __be32 magic; __be32 version; struct fwdb_country country[]; } __packed __aligned(4); static int ecw2cw(int ecw) { return (1 << ecw) - 1; } static bool valid_wmm(struct fwdb_wmm_rule *rule) { struct fwdb_wmm_ac *ac = (struct fwdb_wmm_ac *)rule; int i; for (i = 0; i < IEEE80211_NUM_ACS * 2; i++) { u16 cw_min = ecw2cw((ac[i].ecw & 0xf0) >> 4); u16 cw_max = ecw2cw(ac[i].ecw & 0x0f); u8 aifsn = ac[i].aifsn; if (cw_min >= cw_max) return false; if (aifsn < 1) return false; } return true; } static bool valid_rule(const u8 *data, unsigned int size, u16 rule_ptr) { struct fwdb_rule *rule = (void *)(data + (rule_ptr << 2)); if ((u8 *)rule + sizeof(rule->len) > data + size) return false; /* mandatory fields */ if (rule->len < offsetofend(struct fwdb_rule, max_bw)) return false; if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; struct fwdb_wmm_rule *wmm; if (wmm_ptr + sizeof(struct fwdb_wmm_rule) > size) return false; wmm = (void *)(data + wmm_ptr); if (!valid_wmm(wmm)) return false; } return true; } static bool valid_country(const u8 *data, unsigned int size, const struct fwdb_country *country) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)(data + ptr); __be16 *rules_ptr; unsigned int i; /* make sure we can read len/n_rules */ if ((u8 *)coll + offsetofend(typeof(*coll), n_rules) > data + size) return false; /* make sure base struct and all rules fit */ if ((u8 *)coll + ALIGN(coll->len, 2) + (coll->n_rules * 2) > data + size) return false; /* mandatory fields must exist */ if (coll->len < offsetofend(struct fwdb_collection, dfs_region)) return false; rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); for (i = 0; i < coll->n_rules; i++) { u16 rule_ptr = be16_to_cpu(rules_ptr[i]); if (!valid_rule(data, size, rule_ptr)) return false; } return true; } #ifdef CONFIG_CFG80211_REQUIRE_SIGNED_REGDB #include <keys/asymmetric-type.h> static struct key *builtin_regdb_keys; static int __init load_builtin_regdb_keys(void) { builtin_regdb_keys = keyring_alloc(".builtin_regdb_keys", KUIDT_INIT(0), KGIDT_INIT(0), current_cred(), ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH), KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(builtin_regdb_keys)) return PTR_ERR(builtin_regdb_keys); pr_notice("Loading compiled-in X.509 certificates for regulatory database\n"); #ifdef CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS x509_load_certificate_list(shipped_regdb_certs, shipped_regdb_certs_len, builtin_regdb_keys); #endif #ifdef CONFIG_CFG80211_EXTRA_REGDB_KEYDIR if (CONFIG_CFG80211_EXTRA_REGDB_KEYDIR[0] != '\0') x509_load_certificate_list(extra_regdb_certs, extra_regdb_certs_len, builtin_regdb_keys); #endif return 0; } MODULE_FIRMWARE("regulatory.db.p7s"); static bool regdb_has_valid_signature(const u8 *data, unsigned int size) { const struct firmware *sig; bool result; if (request_firmware(&sig, "regulatory.db.p7s", &reg_pdev->dev)) return false; result = verify_pkcs7_signature(data, size, sig->data, sig->size, builtin_regdb_keys, VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL) == 0; release_firmware(sig); return result; } static void free_regdb_keyring(void) { key_put(builtin_regdb_keys); } #else static int load_builtin_regdb_keys(void) { return 0; } static bool regdb_has_valid_signature(const u8 *data, unsigned int size) { return true; } static void free_regdb_keyring(void) { } #endif /* CONFIG_CFG80211_REQUIRE_SIGNED_REGDB */ static bool valid_regdb(const u8 *data, unsigned int size) { const struct fwdb_header *hdr = (void *)data; const struct fwdb_country *country; if (size < sizeof(*hdr)) return false; if (hdr->magic != cpu_to_be32(FWDB_MAGIC)) return false; if (hdr->version != cpu_to_be32(FWDB_VERSION)) return false; if (!regdb_has_valid_signature(data, size)) return false; country = &hdr->country[0]; while ((u8 *)(country + 1) <= data + size) { if (!country->coll_ptr) break; if (!valid_country(data, size, country)) return false; country++; } return true; } static void set_wmm_rule(const struct fwdb_header *db, const struct fwdb_country *country, const struct fwdb_rule *rule, struct ieee80211_reg_rule *rrule) { struct ieee80211_wmm_rule *wmm_rule = &rrule->wmm_rule; struct fwdb_wmm_rule *wmm; unsigned int i, wmm_ptr; wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; wmm = (void *)((u8 *)db + wmm_ptr); if (!valid_wmm(wmm)) { pr_err("Invalid regulatory WMM rule %u-%u in domain %c%c\n", be32_to_cpu(rule->start), be32_to_cpu(rule->end), country->alpha2[0], country->alpha2[1]); return; } for (i = 0; i < IEEE80211_NUM_ACS; i++) { wmm_rule->client[i].cw_min = ecw2cw((wmm->client[i].ecw & 0xf0) >> 4); wmm_rule->client[i].cw_max = ecw2cw(wmm->client[i].ecw & 0x0f); wmm_rule->client[i].aifsn = wmm->client[i].aifsn; wmm_rule->client[i].cot = 1000 * be16_to_cpu(wmm->client[i].cot); wmm_rule->ap[i].cw_min = ecw2cw((wmm->ap[i].ecw & 0xf0) >> 4); wmm_rule->ap[i].cw_max = ecw2cw(wmm->ap[i].ecw & 0x0f); wmm_rule->ap[i].aifsn = wmm->ap[i].aifsn; wmm_rule->ap[i].cot = 1000 * be16_to_cpu(wmm->ap[i].cot); } rrule->has_wmm = true; } static int __regdb_query_wmm(const struct fwdb_header *db, const struct fwdb_country *country, int freq, struct ieee80211_reg_rule *rrule) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); int i; for (i = 0; i < coll->n_rules; i++) { __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2; struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr); if (rule->len < offsetofend(struct fwdb_rule, wmm_ptr)) continue; if (freq >= KHZ_TO_MHZ(be32_to_cpu(rule->start)) && freq <= KHZ_TO_MHZ(be32_to_cpu(rule->end))) { set_wmm_rule(db, country, rule, rrule); return 0; } } return -ENODATA; } int reg_query_regdb_wmm(char *alpha2, int freq, struct ieee80211_reg_rule *rule) { const struct fwdb_header *hdr = regdb; const struct fwdb_country *country; if (!regdb) return -ENODATA; if (IS_ERR(regdb)) return PTR_ERR(regdb); country = &hdr->country[0]; while (country->coll_ptr) { if (alpha2_equal(alpha2, country->alpha2)) return __regdb_query_wmm(regdb, country, freq, rule); country++; } return -ENODATA; } EXPORT_SYMBOL(reg_query_regdb_wmm); static int regdb_query_country(const struct fwdb_header *db, const struct fwdb_country *country) { unsigned int ptr = be16_to_cpu(country->coll_ptr) << 2; struct fwdb_collection *coll = (void *)((u8 *)db + ptr); struct ieee80211_regdomain *regdom; unsigned int i; regdom = kzalloc(struct_size(regdom, reg_rules, coll->n_rules), GFP_KERNEL); if (!regdom) return -ENOMEM; regdom->n_reg_rules = coll->n_rules; regdom->alpha2[0] = country->alpha2[0]; regdom->alpha2[1] = country->alpha2[1]; regdom->dfs_region = coll->dfs_region; for (i = 0; i < regdom->n_reg_rules; i++) { __be16 *rules_ptr = (void *)((u8 *)coll + ALIGN(coll->len, 2)); unsigned int rule_ptr = be16_to_cpu(rules_ptr[i]) << 2; struct fwdb_rule *rule = (void *)((u8 *)db + rule_ptr); struct ieee80211_reg_rule *rrule = &regdom->reg_rules[i]; rrule->freq_range.start_freq_khz = be32_to_cpu(rule->start); rrule->freq_range.end_freq_khz = be32_to_cpu(rule->end); rrule->freq_range.max_bandwidth_khz = be32_to_cpu(rule->max_bw); rrule->power_rule.max_antenna_gain = 0; rrule->power_rule.max_eirp = be16_to_cpu(rule->max_eirp); rrule->flags = 0; if (rule->flags & FWDB_FLAG_NO_OFDM) rrule->flags |= NL80211_RRF_NO_OFDM; if (rule->flags & FWDB_FLAG_NO_OUTDOOR) rrule->flags |= NL80211_RRF_NO_OUTDOOR; if (rule->flags & FWDB_FLAG_DFS) rrule->flags |= NL80211_RRF_DFS; if (rule->flags & FWDB_FLAG_NO_IR) rrule->flags |= NL80211_RRF_NO_IR; if (rule->flags & FWDB_FLAG_AUTO_BW) rrule->flags |= NL80211_RRF_AUTO_BW; rrule->dfs_cac_ms = 0; /* handle optional data */ if (rule->len >= offsetofend(struct fwdb_rule, cac_timeout)) rrule->dfs_cac_ms = 1000 * be16_to_cpu(rule->cac_timeout); if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) set_wmm_rule(db, country, rule, rrule); } return reg_schedule_apply(regdom); } static int query_regdb(const char *alpha2) { const struct fwdb_header *hdr = regdb; const struct fwdb_country *country; ASSERT_RTNL(); if (IS_ERR(regdb)) return PTR_ERR(regdb); country = &hdr->country[0]; while (country->coll_ptr) { if (alpha2_equal(alpha2, country->alpha2)) return regdb_query_country(regdb, country); country++; } return -ENODATA; } static void regdb_fw_cb(const struct firmware *fw, void *context) { int set_error = 0; bool restore = true; void *db; if (!fw) { pr_info("failed to load regulatory.db\n"); set_error = -ENODATA; } else if (!valid_regdb(fw->data, fw->size)) { pr_info("loaded regulatory.db is malformed or signature is missing/invalid\n"); set_error = -EINVAL; } rtnl_lock(); if (regdb && !IS_ERR(regdb)) { /* negative case - a bug * positive case - can happen due to race in case of multiple cb's in * queue, due to usage of asynchronous callback * * Either case, just restore and free new db. */ } else if (set_error) { regdb = ERR_PTR(set_error); } else if (fw) { db = kmemdup(fw->data, fw->size, GFP_KERNEL); if (db) { regdb = db; restore = context && query_regdb(context); } else { restore = true; } } if (restore) restore_regulatory_settings(true, false); rtnl_unlock(); kfree(context); release_firmware(fw); } MODULE_FIRMWARE("regulatory.db"); static int query_regdb_file(const char *alpha2) { int err; ASSERT_RTNL(); if (regdb) return query_regdb(alpha2); alpha2 = kmemdup(alpha2, 2, GFP_KERNEL); if (!alpha2) return -ENOMEM; err = request_firmware_nowait(THIS_MODULE, true, "regulatory.db", &reg_pdev->dev, GFP_KERNEL, (void *)alpha2, regdb_fw_cb); if (err) kfree(alpha2); return err; } int reg_reload_regdb(void) { const struct firmware *fw; void *db; int err; const struct ieee80211_regdomain *current_regdomain; struct regulatory_request *request; err = request_firmware(&fw, "regulatory.db", &reg_pdev->dev); if (err) return err; if (!valid_regdb(fw->data, fw->size)) { err = -ENODATA; goto out; } db = kmemdup(fw->data, fw->size, GFP_KERNEL); if (!db) { err = -ENOMEM; goto out; } rtnl_lock(); if (!IS_ERR_OR_NULL(regdb)) kfree(regdb); regdb = db; /* reset regulatory domain */ current_regdomain = get_cfg80211_regdom(); request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) { err = -ENOMEM; goto out_unlock; } request->wiphy_idx = WIPHY_IDX_INVALID; request->alpha2[0] = current_regdomain->alpha2[0]; request->alpha2[1] = current_regdomain->alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_CORE; request->user_reg_hint_type = NL80211_USER_REG_HINT_USER; reg_process_hint(request); out_unlock: rtnl_unlock(); out: release_firmware(fw); return err; } static bool reg_query_database(struct regulatory_request *request) { if (query_regdb_file(request->alpha2) == 0) return true; if (call_crda(request->alpha2) == 0) return true; return false; } bool reg_is_valid_request(const char *alpha2) { struct regulatory_request *lr = get_last_request(); if (!lr || lr->processed) return false; return alpha2_equal(lr->alpha2, alpha2); } static const struct ieee80211_regdomain *reg_get_regdomain(struct wiphy *wiphy) { struct regulatory_request *lr = get_last_request(); /* * Follow the driver's regulatory domain, if present, unless a country * IE has been processed or a user wants to help compliance further */ if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && lr->initiator != NL80211_REGDOM_SET_BY_USER && wiphy->regd) return get_wiphy_regdom(wiphy); return get_cfg80211_regdom(); } static unsigned int reg_get_max_bandwidth_from_range(const struct ieee80211_regdomain *rd, const struct ieee80211_reg_rule *rule) { const struct ieee80211_freq_range *freq_range = &rule->freq_range; const struct ieee80211_freq_range *freq_range_tmp; const struct ieee80211_reg_rule *tmp; u32 start_freq, end_freq, idx, no; for (idx = 0; idx < rd->n_reg_rules; idx++) if (rule == &rd->reg_rules[idx]) break; if (idx == rd->n_reg_rules) return 0; /* get start_freq */ no = idx; while (no) { tmp = &rd->reg_rules[--no]; freq_range_tmp = &tmp->freq_range; if (freq_range_tmp->end_freq_khz < freq_range->start_freq_khz) break; freq_range = freq_range_tmp; } start_freq = freq_range->start_freq_khz; /* get end_freq */ freq_range = &rule->freq_range; no = idx; while (no < rd->n_reg_rules - 1) { tmp = &rd->reg_rules[++no]; freq_range_tmp = &tmp->freq_range; if (freq_range_tmp->start_freq_khz > freq_range->end_freq_khz) break; freq_range = freq_range_tmp; } end_freq = freq_range->end_freq_khz; return end_freq - start_freq; } unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd, const struct ieee80211_reg_rule *rule) { unsigned int bw = reg_get_max_bandwidth_from_range(rd, rule); if (rule->flags & NL80211_RRF_NO_320MHZ) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(160)); if (rule->flags & NL80211_RRF_NO_160MHZ) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(80)); if (rule->flags & NL80211_RRF_NO_80MHZ) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(40)); /* * HT40+/HT40- limits are handled per-channel. Only limit BW if both * are not allowed. */ if (rule->flags & NL80211_RRF_NO_HT40MINUS && rule->flags & NL80211_RRF_NO_HT40PLUS) bw = min_t(unsigned int, bw, MHZ_TO_KHZ(20)); return bw; } /* Sanity check on a regulatory rule */ static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule) { const struct ieee80211_freq_range *freq_range = &rule->freq_range; u32 freq_diff; if (freq_range->start_freq_khz <= 0 || freq_range->end_freq_khz <= 0) return false; if (freq_range->start_freq_khz > freq_range->end_freq_khz) return false; freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; if (freq_range->end_freq_khz <= freq_range->start_freq_khz || freq_range->max_bandwidth_khz > freq_diff) return false; return true; } static bool is_valid_rd(const struct ieee80211_regdomain *rd) { const struct ieee80211_reg_rule *reg_rule = NULL; unsigned int i; if (!rd->n_reg_rules) return false; if (WARN_ON(rd->n_reg_rules > NL80211_MAX_SUPP_REG_RULES)) return false; for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; if (!is_valid_reg_rule(reg_rule)) return false; } return true; } /** * freq_in_rule_band - tells us if a frequency is in a frequency band * @freq_range: frequency rule we want to query * @freq_khz: frequency we are inquiring about * * This lets us know if a specific frequency rule is or is not relevant to * a specific frequency's band. Bands are device specific and artificial * definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"), * however it is safe for now to assume that a frequency rule should not be * part of a frequency's band if the start freq or end freq are off by more * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the * 60 GHz band. * This resolution can be lowered and should be considered as we add * regulatory rule support for other "bands". * * Returns: whether or not the frequency is in the range */ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range, u32 freq_khz) { /* * From 802.11ad: directional multi-gigabit (DMG): * Pertaining to operation in a frequency band containing a channel * with the Channel starting frequency above 45 GHz. */ u32 limit = freq_khz > 45 * KHZ_PER_GHZ ? 20 * KHZ_PER_GHZ : 2 * KHZ_PER_GHZ; if (abs(freq_khz - freq_range->start_freq_khz) <= limit) return true; if (abs(freq_khz - freq_range->end_freq_khz) <= limit) return true; return false; } /* * Later on we can perhaps use the more restrictive DFS * region but we don't have information for that yet so * for now simply disallow conflicts. */ static enum nl80211_dfs_regions reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1, const enum nl80211_dfs_regions dfs_region2) { if (dfs_region1 != dfs_region2) return NL80211_DFS_UNSET; return dfs_region1; } static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1, const struct ieee80211_wmm_ac *wmm_ac2, struct ieee80211_wmm_ac *intersect) { intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min); intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max); intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot); intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn); } /* * Helper for regdom_intersect(), this does the real * mathematical intersection fun */ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1, const struct ieee80211_regdomain *rd2, const struct ieee80211_reg_rule *rule1, const struct ieee80211_reg_rule *rule2, struct ieee80211_reg_rule *intersected_rule) { const struct ieee80211_freq_range *freq_range1, *freq_range2; struct ieee80211_freq_range *freq_range; const struct ieee80211_power_rule *power_rule1, *power_rule2; struct ieee80211_power_rule *power_rule; const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2; struct ieee80211_wmm_rule *wmm_rule; u32 freq_diff, max_bandwidth1, max_bandwidth2; freq_range1 = &rule1->freq_range; freq_range2 = &rule2->freq_range; freq_range = &intersected_rule->freq_range; power_rule1 = &rule1->power_rule; power_rule2 = &rule2->power_rule; power_rule = &intersected_rule->power_rule; wmm_rule1 = &rule1->wmm_rule; wmm_rule2 = &rule2->wmm_rule; wmm_rule = &intersected_rule->wmm_rule; freq_range->start_freq_khz = max(freq_range1->start_freq_khz, freq_range2->start_freq_khz); freq_range->end_freq_khz = min(freq_range1->end_freq_khz, freq_range2->end_freq_khz); max_bandwidth1 = freq_range1->max_bandwidth_khz; max_bandwidth2 = freq_range2->max_bandwidth_khz; if (rule1->flags & NL80211_RRF_AUTO_BW) max_bandwidth1 = reg_get_max_bandwidth(rd1, rule1); if (rule2->flags & NL80211_RRF_AUTO_BW) max_bandwidth2 = reg_get_max_bandwidth(rd2, rule2); freq_range->max_bandwidth_khz = min(max_bandwidth1, max_bandwidth2); intersected_rule->flags = rule1->flags | rule2->flags; /* * In case NL80211_RRF_AUTO_BW requested for both rules * set AUTO_BW in intersected rule also. Next we will * calculate BW correctly in handle_channel function. * In other case remove AUTO_BW flag while we calculate * maximum bandwidth correctly and auto calculation is * not required. */ if ((rule1->flags & NL80211_RRF_AUTO_BW) && (rule2->flags & NL80211_RRF_AUTO_BW)) intersected_rule->flags |= NL80211_RRF_AUTO_BW; else intersected_rule->flags &= ~NL80211_RRF_AUTO_BW; freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz; if (freq_range->max_bandwidth_khz > freq_diff) freq_range->max_bandwidth_khz = freq_diff; power_rule->max_eirp = min(power_rule1->max_eirp, power_rule2->max_eirp); power_rule->max_antenna_gain = min(power_rule1->max_antenna_gain, power_rule2->max_antenna_gain); intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms, rule2->dfs_cac_ms); if (rule1->has_wmm && rule2->has_wmm) { u8 ac; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { reg_wmm_rules_intersect(&wmm_rule1->client[ac], &wmm_rule2->client[ac], &wmm_rule->client[ac]); reg_wmm_rules_intersect(&wmm_rule1->ap[ac], &wmm_rule2->ap[ac], &wmm_rule->ap[ac]); } intersected_rule->has_wmm = true; } else if (rule1->has_wmm) { *wmm_rule = *wmm_rule1; intersected_rule->has_wmm = true; } else if (rule2->has_wmm) { *wmm_rule = *wmm_rule2; intersected_rule->has_wmm = true; } else { intersected_rule->has_wmm = false; } if (!is_valid_reg_rule(intersected_rule)) return -EINVAL; return 0; } /* check whether old rule contains new rule */ static bool rule_contains(struct ieee80211_reg_rule *r1, struct ieee80211_reg_rule *r2) { /* for simplicity, currently consider only same flags */ if (r1->flags != r2->flags) return false; /* verify r1 is more restrictive */ if ((r1->power_rule.max_antenna_gain > r2->power_rule.max_antenna_gain) || r1->power_rule.max_eirp > r2->power_rule.max_eirp) return false; /* make sure r2's range is contained within r1 */ if (r1->freq_range.start_freq_khz > r2->freq_range.start_freq_khz || r1->freq_range.end_freq_khz < r2->freq_range.end_freq_khz) return false; /* and finally verify that r1.max_bw >= r2.max_bw */ if (r1->freq_range.max_bandwidth_khz < r2->freq_range.max_bandwidth_khz) return false; return true; } /* add or extend current rules. do nothing if rule is already contained */ static void add_rule(struct ieee80211_reg_rule *rule, struct ieee80211_reg_rule *reg_rules, u32 *n_rules) { struct ieee80211_reg_rule *tmp_rule; int i; for (i = 0; i < *n_rules; i++) { tmp_rule = &reg_rules[i]; /* rule is already contained - do nothing */ if (rule_contains(tmp_rule, rule)) return; /* extend rule if possible */ if (rule_contains(rule, tmp_rule)) { memcpy(tmp_rule, rule, sizeof(*rule)); return; } } memcpy(&reg_rules[*n_rules], rule, sizeof(*rule)); (*n_rules)++; } /** * regdom_intersect - do the intersection between two regulatory domains * @rd1: first regulatory domain * @rd2: second regulatory domain * * Use this function to get the intersection between two regulatory domains. * Once completed we will mark the alpha2 for the rd as intersected, "98", * as no one single alpha2 can represent this regulatory domain. * * Returns a pointer to the regulatory domain structure which will hold the * resulting intersection of rules between rd1 and rd2. We will * kzalloc() this structure for you. * * Returns: the intersected regdomain */ static struct ieee80211_regdomain * regdom_intersect(const struct ieee80211_regdomain *rd1, const struct ieee80211_regdomain *rd2) { int r; unsigned int x, y; unsigned int num_rules = 0; const struct ieee80211_reg_rule *rule1, *rule2; struct ieee80211_reg_rule intersected_rule; struct ieee80211_regdomain *rd; if (!rd1 || !rd2) return NULL; /* * First we get a count of the rules we'll need, then we actually * build them. This is to so we can malloc() and free() a * regdomain once. The reason we use reg_rules_intersect() here * is it will return -EINVAL if the rule computed makes no sense. * All rules that do check out OK are valid. */ for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; if (!reg_rules_intersect(rd1, rd2, rule1, rule2, &intersected_rule)) num_rules++; } } if (!num_rules) return NULL; rd = kzalloc(struct_size(rd, reg_rules, num_rules), GFP_KERNEL); if (!rd) return NULL; for (x = 0; x < rd1->n_reg_rules; x++) { rule1 = &rd1->reg_rules[x]; for (y = 0; y < rd2->n_reg_rules; y++) { rule2 = &rd2->reg_rules[y]; r = reg_rules_intersect(rd1, rd2, rule1, rule2, &intersected_rule); /* * No need to memset here the intersected rule here as * we're not using the stack anymore */ if (r) continue; add_rule(&intersected_rule, rd->reg_rules, &rd->n_reg_rules); } } rd->alpha2[0] = '9'; rd->alpha2[1] = '8'; rd->dfs_region = reg_intersect_dfs_region(rd1->dfs_region, rd2->dfs_region); return rd; } /* * XXX: add support for the rest of enum nl80211_reg_rule_flags, we may * want to just have the channel structure use these */ static u32 map_regdom_flags(u32 rd_flags) { u32 channel_flags = 0; if (rd_flags & NL80211_RRF_NO_IR_ALL) channel_flags |= IEEE80211_CHAN_NO_IR; if (rd_flags & NL80211_RRF_DFS) channel_flags |= IEEE80211_CHAN_RADAR; if (rd_flags & NL80211_RRF_NO_OFDM) channel_flags |= IEEE80211_CHAN_NO_OFDM; if (rd_flags & NL80211_RRF_NO_OUTDOOR) channel_flags |= IEEE80211_CHAN_INDOOR_ONLY; if (rd_flags & NL80211_RRF_IR_CONCURRENT) channel_flags |= IEEE80211_CHAN_IR_CONCURRENT; if (rd_flags & NL80211_RRF_NO_HT40MINUS) channel_flags |= IEEE80211_CHAN_NO_HT40MINUS; if (rd_flags & NL80211_RRF_NO_HT40PLUS) channel_flags |= IEEE80211_CHAN_NO_HT40PLUS; if (rd_flags & NL80211_RRF_NO_80MHZ) channel_flags |= IEEE80211_CHAN_NO_80MHZ; if (rd_flags & NL80211_RRF_NO_160MHZ) channel_flags |= IEEE80211_CHAN_NO_160MHZ; if (rd_flags & NL80211_RRF_NO_HE) channel_flags |= IEEE80211_CHAN_NO_HE; if (rd_flags & NL80211_RRF_NO_320MHZ) channel_flags |= IEEE80211_CHAN_NO_320MHZ; if (rd_flags & NL80211_RRF_NO_EHT) channel_flags |= IEEE80211_CHAN_NO_EHT; if (rd_flags & NL80211_RRF_DFS_CONCURRENT) channel_flags |= IEEE80211_CHAN_DFS_CONCURRENT; if (rd_flags & NL80211_RRF_NO_6GHZ_VLP_CLIENT) channel_flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT; if (rd_flags & NL80211_RRF_NO_6GHZ_AFC_CLIENT) channel_flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT; if (rd_flags & NL80211_RRF_PSD) channel_flags |= IEEE80211_CHAN_PSD; if (rd_flags & NL80211_RRF_ALLOW_6GHZ_VLP_AP) channel_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP; return channel_flags; } static const struct ieee80211_reg_rule * freq_reg_info_regd(u32 center_freq, const struct ieee80211_regdomain *regd, u32 bw) { int i; bool band_rule_found = false; bool bw_fits = false; if (!regd) return ERR_PTR(-EINVAL); for (i = 0; i < regd->n_reg_rules; i++) { const struct ieee80211_reg_rule *rr; const struct ieee80211_freq_range *fr = NULL; rr = &regd->reg_rules[i]; fr = &rr->freq_range; /* * We only need to know if one frequency rule was * in center_freq's band, that's enough, so let's * not overwrite it once found */ if (!band_rule_found) band_rule_found = freq_in_rule_band(fr, center_freq); bw_fits = cfg80211_does_bw_fit_range(fr, center_freq, bw); if (band_rule_found && bw_fits) return rr; } if (!band_rule_found) return ERR_PTR(-ERANGE); return ERR_PTR(-EINVAL); } static const struct ieee80211_reg_rule * __freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw) { const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy); static const u32 bws[] = {0, 1, 2, 4, 5, 8, 10, 16, 20}; const struct ieee80211_reg_rule *reg_rule = ERR_PTR(-ERANGE); int i = ARRAY_SIZE(bws) - 1; u32 bw; for (bw = MHZ_TO_KHZ(bws[i]); bw >= min_bw; bw = MHZ_TO_KHZ(bws[i--])) { reg_rule = freq_reg_info_regd(center_freq, regd, bw); if (!IS_ERR(reg_rule)) return reg_rule; } return reg_rule; } const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy, u32 center_freq) { u32 min_bw = center_freq < MHZ_TO_KHZ(1000) ? 1 : 20; return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(min_bw)); } EXPORT_SYMBOL(freq_reg_info); const char *reg_initiator_name(enum nl80211_reg_initiator initiator) { switch (initiator) { case NL80211_REGDOM_SET_BY_CORE: return "core"; case NL80211_REGDOM_SET_BY_USER: return "user"; case NL80211_REGDOM_SET_BY_DRIVER: return "driver"; case NL80211_REGDOM_SET_BY_COUNTRY_IE: return "country element"; default: WARN_ON(1); return "bug"; } } EXPORT_SYMBOL(reg_initiator_name); static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd, const struct ieee80211_reg_rule *reg_rule, const struct ieee80211_channel *chan) { const struct ieee80211_freq_range *freq_range = NULL; u32 max_bandwidth_khz, center_freq_khz, bw_flags = 0; bool is_s1g = chan->band == NL80211_BAND_S1GHZ; freq_range = &reg_rule->freq_range; max_bandwidth_khz = freq_range->max_bandwidth_khz; center_freq_khz = ieee80211_channel_to_khz(chan); /* Check if auto calculation requested */ if (reg_rule->flags & NL80211_RRF_AUTO_BW) max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); /* If we get a reg_rule we can assume that at least 5Mhz fit */ if (!cfg80211_does_bw_fit_range(freq_range, center_freq_khz, MHZ_TO_KHZ(10))) bw_flags |= IEEE80211_CHAN_NO_10MHZ; if (!cfg80211_does_bw_fit_range(freq_range, center_freq_khz, MHZ_TO_KHZ(20))) bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (is_s1g) { /* S1G is strict about non overlapping channels. We can * calculate which bandwidth is allowed per channel by finding * the largest bandwidth which cleanly divides the freq_range. */ int edge_offset; int ch_bw = max_bandwidth_khz; while (ch_bw) { edge_offset = (center_freq_khz - ch_bw / 2) - freq_range->start_freq_khz; if (edge_offset % ch_bw == 0) { switch (KHZ_TO_MHZ(ch_bw)) { case 1: bw_flags |= IEEE80211_CHAN_1MHZ; break; case 2: bw_flags |= IEEE80211_CHAN_2MHZ; break; case 4: bw_flags |= IEEE80211_CHAN_4MHZ; break; case 8: bw_flags |= IEEE80211_CHAN_8MHZ; break; case 16: bw_flags |= IEEE80211_CHAN_16MHZ; break; default: /* If we got here, no bandwidths fit on * this frequency, ie. band edge. */ bw_flags |= IEEE80211_CHAN_DISABLED; break; } break; } ch_bw /= 2; } } else { if (max_bandwidth_khz < MHZ_TO_KHZ(10)) bw_flags |= IEEE80211_CHAN_NO_10MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(20)) bw_flags |= IEEE80211_CHAN_NO_20MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(40)) bw_flags |= IEEE80211_CHAN_NO_HT40; if (max_bandwidth_khz < MHZ_TO_KHZ(80)) bw_flags |= IEEE80211_CHAN_NO_80MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(160)) bw_flags |= IEEE80211_CHAN_NO_160MHZ; if (max_bandwidth_khz < MHZ_TO_KHZ(320)) bw_flags |= IEEE80211_CHAN_NO_320MHZ; } return bw_flags; } static void handle_channel_single_rule(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_channel *chan, u32 flags, struct regulatory_request *lr, struct wiphy *request_wiphy, const struct ieee80211_reg_rule *reg_rule) { u32 bw_flags = 0; const struct ieee80211_power_rule *power_rule = NULL; const struct ieee80211_regdomain *regd; regd = reg_get_regdomain(wiphy); power_rule = &reg_rule->power_rule; bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan); if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { /* * This guarantees the driver's requested regulatory domain * will always be used as a base for further regulatory * settings */ chan->flags = chan->orig_flags = map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = chan->orig_mag = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_reg_power = chan->max_power = chan->orig_mpwr = (int) MBM_TO_DBM(power_rule->max_eirp); if (chan->flags & IEEE80211_CHAN_RADAR) { chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; if (reg_rule->dfs_cac_ms) chan->dfs_cac_ms = reg_rule->dfs_cac_ms; } if (chan->flags & IEEE80211_CHAN_PSD) chan->psd = reg_rule->psd; return; } chan->dfs_state = NL80211_DFS_USABLE; chan->dfs_state_entered = jiffies; chan->beacon_found = false; chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags); chan->max_antenna_gain = min_t(int, chan->orig_mag, MBI_TO_DBI(power_rule->max_antenna_gain)); chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); if (chan->flags & IEEE80211_CHAN_RADAR) { if (reg_rule->dfs_cac_ms) chan->dfs_cac_ms = reg_rule->dfs_cac_ms; else chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; } if (chan->flags & IEEE80211_CHAN_PSD) chan->psd = reg_rule->psd; if (chan->orig_mpwr) { /* * Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER * will always follow the passed country IE power settings. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_FOLLOW_POWER) chan->max_power = chan->max_reg_power; else chan->max_power = min(chan->orig_mpwr, chan->max_reg_power); } else chan->max_power = chan->max_reg_power; } static void handle_channel_adjacent_rules(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_channel *chan, u32 flags, struct regulatory_request *lr, struct wiphy *request_wiphy, const struct ieee80211_reg_rule *rrule1, const struct ieee80211_reg_rule *rrule2, struct ieee80211_freq_range *comb_range) { u32 bw_flags1 = 0; u32 bw_flags2 = 0; const struct ieee80211_power_rule *power_rule1 = NULL; const struct ieee80211_power_rule *power_rule2 = NULL; const struct ieee80211_regdomain *regd; regd = reg_get_regdomain(wiphy); power_rule1 = &rrule1->power_rule; power_rule2 = &rrule2->power_rule; bw_flags1 = reg_rule_to_chan_bw_flags(regd, rrule1, chan); bw_flags2 = reg_rule_to_chan_bw_flags(regd, rrule2, chan); if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { /* This guarantees the driver's requested regulatory domain * will always be used as a base for further regulatory * settings */ chan->flags = map_regdom_flags(rrule1->flags) | map_regdom_flags(rrule2->flags) | bw_flags1 | bw_flags2; chan->orig_flags = chan->flags; chan->max_antenna_gain = min_t(int, MBI_TO_DBI(power_rule1->max_antenna_gain), MBI_TO_DBI(power_rule2->max_antenna_gain)); chan->orig_mag = chan->max_antenna_gain; chan->max_reg_power = min_t(int, MBM_TO_DBM(power_rule1->max_eirp), MBM_TO_DBM(power_rule2->max_eirp)); chan->max_power = chan->max_reg_power; chan->orig_mpwr = chan->max_reg_power; if (chan->flags & IEEE80211_CHAN_RADAR) { chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms) chan->dfs_cac_ms = max_t(unsigned int, rrule1->dfs_cac_ms, rrule2->dfs_cac_ms); } if ((rrule1->flags & NL80211_RRF_PSD) && (rrule2->flags & NL80211_RRF_PSD)) chan->psd = min_t(s8, rrule1->psd, rrule2->psd); else chan->flags &= ~NL80211_RRF_PSD; return; } chan->dfs_state = NL80211_DFS_USABLE; chan->dfs_state_entered = jiffies; chan->beacon_found = false; chan->flags = flags | bw_flags1 | bw_flags2 | map_regdom_flags(rrule1->flags) | map_regdom_flags(rrule2->flags); /* reg_rule_to_chan_bw_flags may forbids 10 and forbids 20 MHz * (otherwise no adj. rule case), recheck therefore */ if (cfg80211_does_bw_fit_range(comb_range, ieee80211_channel_to_khz(chan), MHZ_TO_KHZ(10))) chan->flags &= ~IEEE80211_CHAN_NO_10MHZ; if (cfg80211_does_bw_fit_range(comb_range, ieee80211_channel_to_khz(chan), MHZ_TO_KHZ(20))) chan->flags &= ~IEEE80211_CHAN_NO_20MHZ; chan->max_antenna_gain = min_t(int, chan->orig_mag, min_t(int, MBI_TO_DBI(power_rule1->max_antenna_gain), MBI_TO_DBI(power_rule2->max_antenna_gain))); chan->max_reg_power = min_t(int, MBM_TO_DBM(power_rule1->max_eirp), MBM_TO_DBM(power_rule2->max_eirp)); if (chan->flags & IEEE80211_CHAN_RADAR) { if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms) chan->dfs_cac_ms = max_t(unsigned int, rrule1->dfs_cac_ms, rrule2->dfs_cac_ms); else chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; } if (chan->orig_mpwr) { /* Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER * will always follow the passed country IE power settings. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_FOLLOW_POWER) chan->max_power = chan->max_reg_power; else chan->max_power = min(chan->orig_mpwr, chan->max_reg_power); } else { chan->max_power = chan->max_reg_power; } } /* Note that right now we assume the desired channel bandwidth * is always 20 MHz for each individual channel (HT40 uses 20 MHz * per channel, the primary and the extension channel). */ static void handle_channel(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_channel *chan) { const u32 orig_chan_freq = ieee80211_channel_to_khz(chan); struct regulatory_request *lr = get_last_request(); struct wiphy *request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); const struct ieee80211_reg_rule *rrule = NULL; const struct ieee80211_reg_rule *rrule1 = NULL; const struct ieee80211_reg_rule *rrule2 = NULL; u32 flags = chan->orig_flags; rrule = freq_reg_info(wiphy, orig_chan_freq); if (IS_ERR(rrule)) { /* check for adjacent match, therefore get rules for * chan - 20 MHz and chan + 20 MHz and test * if reg rules are adjacent */ rrule1 = freq_reg_info(wiphy, orig_chan_freq - MHZ_TO_KHZ(20)); rrule2 = freq_reg_info(wiphy, orig_chan_freq + MHZ_TO_KHZ(20)); if (!IS_ERR(rrule1) && !IS_ERR(rrule2)) { struct ieee80211_freq_range comb_range; if (rrule1->freq_range.end_freq_khz != rrule2->freq_range.start_freq_khz) goto disable_chan; comb_range.start_freq_khz = rrule1->freq_range.start_freq_khz; comb_range.end_freq_khz = rrule2->freq_range.end_freq_khz; comb_range.max_bandwidth_khz = min_t(u32, rrule1->freq_range.max_bandwidth_khz, rrule2->freq_range.max_bandwidth_khz); if (!cfg80211_does_bw_fit_range(&comb_range, orig_chan_freq, MHZ_TO_KHZ(20))) goto disable_chan; handle_channel_adjacent_rules(wiphy, initiator, chan, flags, lr, request_wiphy, rrule1, rrule2, &comb_range); return; } disable_chan: /* We will disable all channels that do not match our * received regulatory rule unless the hint is coming * from a Country IE and the Country IE had no information * about a band. The IEEE 802.11 spec allows for an AP * to send only a subset of the regulatory rules allowed, * so an AP in the US that only supports 2.4 GHz may only send * a country IE with information for the 2.4 GHz band * while 5 GHz is still supported. */ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && PTR_ERR(rrule) == -ERANGE) return; if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && request_wiphy && request_wiphy == wiphy && request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) { pr_debug("Disabling freq %d.%03d MHz for good\n", chan->center_freq, chan->freq_offset); chan->orig_flags |= IEEE80211_CHAN_DISABLED; chan->flags = chan->orig_flags; } else { pr_debug("Disabling freq %d.%03d MHz\n", chan->center_freq, chan->freq_offset); chan->flags |= IEEE80211_CHAN_DISABLED; } return; } handle_channel_single_rule(wiphy, initiator, chan, flags, lr, request_wiphy, rrule); } static void handle_band(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_supported_band *sband) { unsigned int i; if (!sband) return; for (i = 0; i < sband->n_channels; i++) handle_channel(wiphy, initiator, &sband->channels[i]); } static bool reg_request_cell_base(struct regulatory_request *request) { if (request->initiator != NL80211_REGDOM_SET_BY_USER) return false; return request->user_reg_hint_type == NL80211_USER_REG_HINT_CELL_BASE; } bool reg_last_request_cell_base(void) { return reg_request_cell_base(get_last_request()); } #ifdef CONFIG_CFG80211_REG_CELLULAR_HINTS /* Core specific check */ static enum reg_request_treatment reg_ignore_cell_hint(struct regulatory_request *pending_request) { struct regulatory_request *lr = get_last_request(); if (!reg_num_devs_support_basehint) return REG_REQ_IGNORE; if (reg_request_cell_base(lr) && !regdom_changes(pending_request->alpha2)) return REG_REQ_ALREADY_SET; return REG_REQ_OK; } /* Device specific check */ static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy) { return !(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS); } #else static enum reg_request_treatment reg_ignore_cell_hint(struct regulatory_request *pending_request) { return REG_REQ_IGNORE; } static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy) { return true; } #endif static bool wiphy_strict_alpha2_regd(struct wiphy *wiphy) { if (wiphy->regulatory_flags & REGULATORY_STRICT_REG && !(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)) return true; return false; } static bool ignore_reg_update(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { struct regulatory_request *lr = get_last_request(); if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) return true; if (!lr) { pr_debug("Ignoring regulatory request set by %s since last_request is not set\n", reg_initiator_name(initiator)); return true; } if (initiator == NL80211_REGDOM_SET_BY_CORE && wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) { pr_debug("Ignoring regulatory request set by %s since the driver uses its own custom regulatory domain\n", reg_initiator_name(initiator)); return true; } /* * wiphy->regd will be set once the device has its own * desired regulatory domain set */ if (wiphy_strict_alpha2_regd(wiphy) && !wiphy->regd && initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && !is_world_regdom(lr->alpha2)) { pr_debug("Ignoring regulatory request set by %s since the driver requires its own regulatory domain to be set first\n", reg_initiator_name(initiator)); return true; } if (reg_request_cell_base(lr)) return reg_dev_ignore_cell_hint(wiphy); return false; } static bool reg_is_world_roaming(struct wiphy *wiphy) { const struct ieee80211_regdomain *cr = get_cfg80211_regdom(); const struct ieee80211_regdomain *wr = get_wiphy_regdom(wiphy); struct regulatory_request *lr = get_last_request(); if (is_world_regdom(cr->alpha2) || (wr && is_world_regdom(wr->alpha2))) return true; if (lr && lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE && wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) return true; return false; } static void reg_call_notifier(struct wiphy *wiphy, struct regulatory_request *request) { if (wiphy->reg_notifier) wiphy->reg_notifier(wiphy, request); } static void handle_reg_beacon(struct wiphy *wiphy, unsigned int chan_idx, struct reg_beacon *reg_beacon) { struct ieee80211_supported_band *sband; struct ieee80211_channel *chan; bool channel_changed = false; struct ieee80211_channel chan_before; struct regulatory_request *lr = get_last_request(); sband = wiphy->bands[reg_beacon->chan.band]; chan = &sband->channels[chan_idx]; if (likely(!ieee80211_channel_equal(chan, &reg_beacon->chan))) return; if (chan->beacon_found) return; chan->beacon_found = true; if (!reg_is_world_roaming(wiphy)) return; if (wiphy->regulatory_flags & REGULATORY_DISABLE_BEACON_HINTS) return; chan_before = *chan; if (chan->flags & IEEE80211_CHAN_NO_IR) { chan->flags &= ~IEEE80211_CHAN_NO_IR; channel_changed = true; } if (channel_changed) { nl80211_send_beacon_hint_event(wiphy, &chan_before, chan); if (wiphy->flags & WIPHY_FLAG_CHANNEL_CHANGE_ON_BEACON) reg_call_notifier(wiphy, lr); } } /* * Called when a scan on a wiphy finds a beacon on * new channel */ static void wiphy_update_new_beacon(struct wiphy *wiphy, struct reg_beacon *reg_beacon) { unsigned int i; struct ieee80211_supported_band *sband; if (!wiphy->bands[reg_beacon->chan.band]) return; sband = wiphy->bands[reg_beacon->chan.band]; for (i = 0; i < sband->n_channels; i++) handle_reg_beacon(wiphy, i, reg_beacon); } /* * Called upon reg changes or a new wiphy is added */ static void wiphy_update_beacon_reg(struct wiphy *wiphy) { unsigned int i; struct ieee80211_supported_band *sband; struct reg_beacon *reg_beacon; list_for_each_entry(reg_beacon, &reg_beacon_list, list) { if (!wiphy->bands[reg_beacon->chan.band]) continue; sband = wiphy->bands[reg_beacon->chan.band]; for (i = 0; i < sband->n_channels; i++) handle_reg_beacon(wiphy, i, reg_beacon); } } /* Reap the advantages of previously found beacons */ static void reg_process_beacons(struct wiphy *wiphy) { /* * Means we are just firing up cfg80211, so no beacons would * have been processed yet. */ if (!last_request) return; wiphy_update_beacon_reg(wiphy); } static bool is_ht40_allowed(struct ieee80211_channel *chan) { if (!chan) return false; if (chan->flags & IEEE80211_CHAN_DISABLED) return false; /* This would happen when regulatory rules disallow HT40 completely */ if ((chan->flags & IEEE80211_CHAN_NO_HT40) == IEEE80211_CHAN_NO_HT40) return false; return true; } static void reg_process_ht_flags_channel(struct wiphy *wiphy, struct ieee80211_channel *channel) { struct ieee80211_supported_band *sband = wiphy->bands[channel->band]; struct ieee80211_channel *channel_before = NULL, *channel_after = NULL; const struct ieee80211_regdomain *regd; unsigned int i; u32 flags; if (!is_ht40_allowed(channel)) { channel->flags |= IEEE80211_CHAN_NO_HT40; return; } /* * We need to ensure the extension channels exist to * be able to use HT40- or HT40+, this finds them (or not) */ for (i = 0; i < sband->n_channels; i++) { struct ieee80211_channel *c = &sband->channels[i]; if (c->center_freq == (channel->center_freq - 20)) channel_before = c; if (c->center_freq == (channel->center_freq + 20)) channel_after = c; } flags = 0; regd = get_wiphy_regdom(wiphy); if (regd) { const struct ieee80211_reg_rule *reg_rule = freq_reg_info_regd(MHZ_TO_KHZ(channel->center_freq), regd, MHZ_TO_KHZ(20)); if (!IS_ERR(reg_rule)) flags = reg_rule->flags; } /* * Please note that this assumes target bandwidth is 20 MHz, * if that ever changes we also need to change the below logic * to include that as well. */ if (!is_ht40_allowed(channel_before) || flags & NL80211_RRF_NO_HT40MINUS) channel->flags |= IEEE80211_CHAN_NO_HT40MINUS; else channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; if (!is_ht40_allowed(channel_after) || flags & NL80211_RRF_NO_HT40PLUS) channel->flags |= IEEE80211_CHAN_NO_HT40PLUS; else channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; } static void reg_process_ht_flags_band(struct wiphy *wiphy, struct ieee80211_supported_band *sband) { unsigned int i; if (!sband) return; for (i = 0; i < sband->n_channels; i++) reg_process_ht_flags_channel(wiphy, &sband->channels[i]); } static void reg_process_ht_flags(struct wiphy *wiphy) { enum nl80211_band band; if (!wiphy) return; for (band = 0; band < NUM_NL80211_BANDS; band++) reg_process_ht_flags_band(wiphy, wiphy->bands[band]); } static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) { struct cfg80211_chan_def chandef = {}; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); enum nl80211_iftype iftype; bool ret; int link; iftype = wdev->iftype; /* make sure the interface is active */ if (!wdev->netdev || !netif_running(wdev->netdev)) return true; for (link = 0; link < ARRAY_SIZE(wdev->links); link++) { struct ieee80211_channel *chan; if (!wdev->valid_links && link > 0) break; if (wdev->valid_links && !(wdev->valid_links & BIT(link))) continue; switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: if (!wdev->links[link].ap.beacon_interval) continue; chandef = wdev->links[link].ap.chandef; break; case NL80211_IFTYPE_MESH_POINT: if (!wdev->u.mesh.beacon_interval) continue; chandef = wdev->u.mesh.chandef; break; case NL80211_IFTYPE_ADHOC: if (!wdev->u.ibss.ssid_len) continue; chandef = wdev->u.ibss.chandef; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: /* Maybe we could consider disabling that link only? */ if (!wdev->links[link].client.current_bss) continue; chan = wdev->links[link].client.current_bss->pub.channel; if (!chan) continue; if (!rdev->ops->get_channel || rdev_get_channel(rdev, wdev, link, &chandef)) cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT); break; case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_DEVICE: /* no enforcement required */ break; case NL80211_IFTYPE_OCB: if (!wdev->u.ocb.chandef.chan) continue; chandef = wdev->u.ocb.chandef; break; case NL80211_IFTYPE_NAN: /* we have no info, but NAN is also pretty universal */ continue; default: /* others not implemented for now */ WARN_ON_ONCE(1); break; } switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: ret = cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype); if (!ret) return ret; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: ret = cfg80211_chandef_usable(wiphy, &chandef, IEEE80211_CHAN_DISABLED); if (!ret) return ret; break; default: break; } } return true; } static void reg_leave_invalid_chans(struct wiphy *wiphy) { struct wireless_dev *wdev; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); wiphy_lock(wiphy); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) if (!reg_wdev_chan_valid(wiphy, wdev)) cfg80211_leave(rdev, wdev); wiphy_unlock(wiphy); } static void reg_check_chans_work(struct work_struct *work) { struct cfg80211_registered_device *rdev; pr_debug("Verifying active interfaces after reg change\n"); rtnl_lock(); for_each_rdev(rdev) reg_leave_invalid_chans(&rdev->wiphy); rtnl_unlock(); } void reg_check_channels(void) { /* * Give usermode a chance to do something nicer (move to another * channel, orderly disconnection), before forcing a disconnection. */ mod_delayed_work(system_power_efficient_wq, &reg_check_chans, msecs_to_jiffies(REG_ENFORCE_GRACE_MS)); } static void wiphy_update_regulatory(struct wiphy *wiphy, enum nl80211_reg_initiator initiator) { enum nl80211_band band; struct regulatory_request *lr = get_last_request(); if (ignore_reg_update(wiphy, initiator)) { /* * Regulatory updates set by CORE are ignored for custom * regulatory cards. Let us notify the changes to the driver, * as some drivers used this to restore its orig_* reg domain. */ if (initiator == NL80211_REGDOM_SET_BY_CORE && wiphy->regulatory_flags & REGULATORY_CUSTOM_REG && !(wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)) reg_call_notifier(wiphy, lr); return; } lr->dfs_region = get_cfg80211_regdom()->dfs_region; for (band = 0; band < NUM_NL80211_BANDS; band++) handle_band(wiphy, initiator, wiphy->bands[band]); reg_process_beacons(wiphy); reg_process_ht_flags(wiphy); reg_call_notifier(wiphy, lr); } static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator) { struct cfg80211_registered_device *rdev; struct wiphy *wiphy; ASSERT_RTNL(); for_each_rdev(rdev) { wiphy = &rdev->wiphy; wiphy_update_regulatory(wiphy, initiator); } reg_check_channels(); } static void handle_channel_custom(struct wiphy *wiphy, struct ieee80211_channel *chan, const struct ieee80211_regdomain *regd, u32 min_bw) { u32 bw_flags = 0; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_power_rule *power_rule = NULL; u32 bw, center_freq_khz; center_freq_khz = ieee80211_channel_to_khz(chan); for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) { reg_rule = freq_reg_info_regd(center_freq_khz, regd, bw); if (!IS_ERR(reg_rule)) break; } if (IS_ERR_OR_NULL(reg_rule)) { pr_debug("Disabling freq %d.%03d MHz as custom regd has no rule that fits it\n", chan->center_freq, chan->freq_offset); if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { chan->flags |= IEEE80211_CHAN_DISABLED; } else { chan->orig_flags |= IEEE80211_CHAN_DISABLED; chan->flags = chan->orig_flags; } return; } power_rule = &reg_rule->power_rule; bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan); chan->dfs_state_entered = jiffies; chan->dfs_state = NL80211_DFS_USABLE; chan->beacon_found = false; if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) chan->flags = chan->orig_flags | bw_flags | map_regdom_flags(reg_rule->flags); else chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags; chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain); chan->max_reg_power = chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp); if (chan->flags & IEEE80211_CHAN_RADAR) { if (reg_rule->dfs_cac_ms) chan->dfs_cac_ms = reg_rule->dfs_cac_ms; else chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS; } if (chan->flags & IEEE80211_CHAN_PSD) chan->psd = reg_rule->psd; chan->max_power = chan->max_reg_power; } static void handle_band_custom(struct wiphy *wiphy, struct ieee80211_supported_band *sband, const struct ieee80211_regdomain *regd) { unsigned int i; if (!sband) return; /* * We currently assume that you always want at least 20 MHz, * otherwise channel 12 might get enabled if this rule is * compatible to US, which permits 2402 - 2472 MHz. */ for (i = 0; i < sband->n_channels; i++) handle_channel_custom(wiphy, &sband->channels[i], regd, MHZ_TO_KHZ(20)); } /* Used by drivers prior to wiphy registration */ void wiphy_apply_custom_regulatory(struct wiphy *wiphy, const struct ieee80211_regdomain *regd) { const struct ieee80211_regdomain *new_regd, *tmp; enum nl80211_band band; unsigned int bands_set = 0; WARN(!(wiphy->regulatory_flags & REGULATORY_CUSTOM_REG), "wiphy should have REGULATORY_CUSTOM_REG\n"); wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; for (band = 0; band < NUM_NL80211_BANDS; band++) { if (!wiphy->bands[band]) continue; handle_band_custom(wiphy, wiphy->bands[band], regd); bands_set++; } /* * no point in calling this if it won't have any effect * on your device's supported bands. */ WARN_ON(!bands_set); new_regd = reg_copy_regd(regd); if (IS_ERR(new_regd)) return; rtnl_lock(); wiphy_lock(wiphy); tmp = get_wiphy_regdom(wiphy); rcu_assign_pointer(wiphy->regd, new_regd); rcu_free_regdom(tmp); wiphy_unlock(wiphy); rtnl_unlock(); } EXPORT_SYMBOL(wiphy_apply_custom_regulatory); static void reg_set_request_processed(void) { bool need_more_processing = false; struct regulatory_request *lr = get_last_request(); lr->processed = true; spin_lock(&reg_requests_lock); if (!list_empty(&reg_requests_list)) need_more_processing = true; spin_unlock(&reg_requests_lock); cancel_crda_timeout(); if (need_more_processing) schedule_work(&reg_work); } /** * reg_process_hint_core - process core regulatory requests * @core_request: a pending core regulatory request * * The wireless subsystem can use this function to process * a regulatory request issued by the regulatory core. * * Returns: %REG_REQ_OK or %REG_REQ_IGNORE, indicating if the * hint was processed or ignored */ static enum reg_request_treatment reg_process_hint_core(struct regulatory_request *core_request) { if (reg_query_database(core_request)) { core_request->intersect = false; core_request->processed = false; reg_update_last_request(core_request); return REG_REQ_OK; } return REG_REQ_IGNORE; } static enum reg_request_treatment __reg_process_hint_user(struct regulatory_request *user_request) { struct regulatory_request *lr = get_last_request(); if (reg_request_cell_base(user_request)) return reg_ignore_cell_hint(user_request); if (reg_request_cell_base(lr)) return REG_REQ_IGNORE; if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) return REG_REQ_INTERSECT; /* * If the user knows better the user should set the regdom * to their country before the IE is picked up */ if (lr->initiator == NL80211_REGDOM_SET_BY_USER && lr->intersect) return REG_REQ_IGNORE; /* * Process user requests only after previous user/driver/core * requests have been processed */ if ((lr->initiator == NL80211_REGDOM_SET_BY_CORE || lr->initiator == NL80211_REGDOM_SET_BY_DRIVER || lr->initiator == NL80211_REGDOM_SET_BY_USER) && regdom_changes(lr->alpha2)) return REG_REQ_IGNORE; if (!regdom_changes(user_request->alpha2)) return REG_REQ_ALREADY_SET; return REG_REQ_OK; } /** * reg_process_hint_user - process user regulatory requests * @user_request: a pending user regulatory request * * The wireless subsystem can use this function to process * a regulatory request initiated by userspace. * * Returns: %REG_REQ_OK or %REG_REQ_IGNORE, indicating if the * hint was processed or ignored */ static enum reg_request_treatment reg_process_hint_user(struct regulatory_request *user_request) { enum reg_request_treatment treatment; treatment = __reg_process_hint_user(user_request); if (treatment == REG_REQ_IGNORE || treatment == REG_REQ_ALREADY_SET) return REG_REQ_IGNORE; user_request->intersect = treatment == REG_REQ_INTERSECT; user_request->processed = false; if (reg_query_database(user_request)) { reg_update_last_request(user_request); user_alpha2[0] = user_request->alpha2[0]; user_alpha2[1] = user_request->alpha2[1]; return REG_REQ_OK; } return REG_REQ_IGNORE; } static enum reg_request_treatment __reg_process_hint_driver(struct regulatory_request *driver_request) { struct regulatory_request *lr = get_last_request(); if (lr->initiator == NL80211_REGDOM_SET_BY_CORE) { if (regdom_changes(driver_request->alpha2)) return REG_REQ_OK; return REG_REQ_ALREADY_SET; } /* * This would happen if you unplug and plug your card * back in or if you add a new device for which the previously * loaded card also agrees on the regulatory domain. */ if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER && !regdom_changes(driver_request->alpha2)) return REG_REQ_ALREADY_SET; return REG_REQ_INTERSECT; } /** * reg_process_hint_driver - process driver regulatory requests * @wiphy: the wireless device for the regulatory request * @driver_request: a pending driver regulatory request * * The wireless subsystem can use this function to process * a regulatory request issued by an 802.11 driver. * * Returns: one of the different reg request treatment values. */ static enum reg_request_treatment reg_process_hint_driver(struct wiphy *wiphy, struct regulatory_request *driver_request) { const struct ieee80211_regdomain *regd, *tmp; enum reg_request_treatment treatment; treatment = __reg_process_hint_driver(driver_request); switch (treatment) { case REG_REQ_OK: break; case REG_REQ_IGNORE: return REG_REQ_IGNORE; case REG_REQ_INTERSECT: case REG_REQ_ALREADY_SET: regd = reg_copy_regd(get_cfg80211_regdom()); if (IS_ERR(regd)) return REG_REQ_IGNORE; tmp = get_wiphy_regdom(wiphy); ASSERT_RTNL(); wiphy_lock(wiphy); rcu_assign_pointer(wiphy->regd, regd); wiphy_unlock(wiphy); rcu_free_regdom(tmp); } driver_request->intersect = treatment == REG_REQ_INTERSECT; driver_request->processed = false; /* * Since CRDA will not be called in this case as we already * have applied the requested regulatory domain before we just * inform userspace we have processed the request */ if (treatment == REG_REQ_ALREADY_SET) { nl80211_send_reg_change_event(driver_request); reg_update_last_request(driver_request); reg_set_request_processed(); return REG_REQ_ALREADY_SET; } if (reg_query_database(driver_request)) { reg_update_last_request(driver_request); return REG_REQ_OK; } return REG_REQ_IGNORE; } static enum reg_request_treatment __reg_process_hint_country_ie(struct wiphy *wiphy, struct regulatory_request *country_ie_request) { struct wiphy *last_wiphy = NULL; struct regulatory_request *lr = get_last_request(); if (reg_request_cell_base(lr)) { /* Trust a Cell base station over the AP's country IE */ if (regdom_changes(country_ie_request->alpha2)) return REG_REQ_IGNORE; return REG_REQ_ALREADY_SET; } else { if (wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_IGNORE) return REG_REQ_IGNORE; } if (unlikely(!is_an_alpha2(country_ie_request->alpha2))) return -EINVAL; if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) return REG_REQ_OK; last_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); if (last_wiphy != wiphy) { /* * Two cards with two APs claiming different * Country IE alpha2s. We could * intersect them, but that seems unlikely * to be correct. Reject second one for now. */ if (regdom_changes(country_ie_request->alpha2)) return REG_REQ_IGNORE; return REG_REQ_ALREADY_SET; } if (regdom_changes(country_ie_request->alpha2)) return REG_REQ_OK; return REG_REQ_ALREADY_SET; } /** * reg_process_hint_country_ie - process regulatory requests from country IEs * @wiphy: the wireless device for the regulatory request * @country_ie_request: a regulatory request from a country IE * * The wireless subsystem can use this function to process * a regulatory request issued by a country Information Element. * * Returns: one of the different reg request treatment values. */ static enum reg_request_treatment reg_process_hint_country_ie(struct wiphy *wiphy, struct regulatory_request *country_ie_request) { enum reg_request_treatment treatment; treatment = __reg_process_hint_country_ie(wiphy, country_ie_request); switch (treatment) { case REG_REQ_OK: break; case REG_REQ_IGNORE: return REG_REQ_IGNORE; case REG_REQ_ALREADY_SET: reg_free_request(country_ie_request); return REG_REQ_ALREADY_SET; case REG_REQ_INTERSECT: /* * This doesn't happen yet, not sure we * ever want to support it for this case. */ WARN_ONCE(1, "Unexpected intersection for country elements"); return REG_REQ_IGNORE; } country_ie_request->intersect = false; country_ie_request->processed = false; if (reg_query_database(country_ie_request)) { reg_update_last_request(country_ie_request); return REG_REQ_OK; } return REG_REQ_IGNORE; } bool reg_dfs_domain_same(struct wiphy *wiphy1, struct wiphy *wiphy2) { const struct ieee80211_regdomain *wiphy1_regd = NULL; const struct ieee80211_regdomain *wiphy2_regd = NULL; const struct ieee80211_regdomain *cfg80211_regd = NULL; bool dfs_domain_same; rcu_read_lock(); cfg80211_regd = rcu_dereference(cfg80211_regdomain); wiphy1_regd = rcu_dereference(wiphy1->regd); if (!wiphy1_regd) wiphy1_regd = cfg80211_regd; wiphy2_regd = rcu_dereference(wiphy2->regd); if (!wiphy2_regd) wiphy2_regd = cfg80211_regd; dfs_domain_same = wiphy1_regd->dfs_region == wiphy2_regd->dfs_region; rcu_read_unlock(); return dfs_domain_same; } static void reg_copy_dfs_chan_state(struct ieee80211_channel *dst_chan, struct ieee80211_channel *src_chan) { if (!(dst_chan->flags & IEEE80211_CHAN_RADAR) || !(src_chan->flags & IEEE80211_CHAN_RADAR)) return; if (dst_chan->flags & IEEE80211_CHAN_DISABLED || src_chan->flags & IEEE80211_CHAN_DISABLED) return; if (src_chan->center_freq == dst_chan->center_freq && dst_chan->dfs_state == NL80211_DFS_USABLE) { dst_chan->dfs_state = src_chan->dfs_state; dst_chan->dfs_state_entered = src_chan->dfs_state_entered; } } static void wiphy_share_dfs_chan_state(struct wiphy *dst_wiphy, struct wiphy *src_wiphy) { struct ieee80211_supported_band *src_sband, *dst_sband; struct ieee80211_channel *src_chan, *dst_chan; int i, j, band; if (!reg_dfs_domain_same(dst_wiphy, src_wiphy)) return; for (band = 0; band < NUM_NL80211_BANDS; band++) { dst_sband = dst_wiphy->bands[band]; src_sband = src_wiphy->bands[band]; if (!dst_sband || !src_sband) continue; for (i = 0; i < dst_sband->n_channels; i++) { dst_chan = &dst_sband->channels[i]; for (j = 0; j < src_sband->n_channels; j++) { src_chan = &src_sband->channels[j]; reg_copy_dfs_chan_state(dst_chan, src_chan); } } } } static void wiphy_all_share_dfs_chan_state(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); for_each_rdev(rdev) { if (wiphy == &rdev->wiphy) continue; wiphy_share_dfs_chan_state(wiphy, &rdev->wiphy); } } /* This processes *all* regulatory hints */ static void reg_process_hint(struct regulatory_request *reg_request) { struct wiphy *wiphy = NULL; enum reg_request_treatment treatment; enum nl80211_reg_initiator initiator = reg_request->initiator; if (reg_request->wiphy_idx != WIPHY_IDX_INVALID) wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx); switch (initiator) { case NL80211_REGDOM_SET_BY_CORE: treatment = reg_process_hint_core(reg_request); break; case NL80211_REGDOM_SET_BY_USER: treatment = reg_process_hint_user(reg_request); break; case NL80211_REGDOM_SET_BY_DRIVER: if (!wiphy) goto out_free; treatment = reg_process_hint_driver(wiphy, reg_request); break; case NL80211_REGDOM_SET_BY_COUNTRY_IE: if (!wiphy) goto out_free; treatment = reg_process_hint_country_ie(wiphy, reg_request); break; default: WARN(1, "invalid initiator %d\n", initiator); goto out_free; } if (treatment == REG_REQ_IGNORE) goto out_free; WARN(treatment != REG_REQ_OK && treatment != REG_REQ_ALREADY_SET, "unexpected treatment value %d\n", treatment); /* This is required so that the orig_* parameters are saved. * NOTE: treatment must be set for any case that reaches here! */ if (treatment == REG_REQ_ALREADY_SET && wiphy && wiphy->regulatory_flags & REGULATORY_STRICT_REG) { wiphy_update_regulatory(wiphy, initiator); wiphy_all_share_dfs_chan_state(wiphy); reg_check_channels(); } return; out_free: reg_free_request(reg_request); } static void notify_self_managed_wiphys(struct regulatory_request *request) { struct cfg80211_registered_device *rdev; struct wiphy *wiphy; for_each_rdev(rdev) { wiphy = &rdev->wiphy; if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED && request->initiator == NL80211_REGDOM_SET_BY_USER) reg_call_notifier(wiphy, request); } } /* * Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_* * Regulatory hints come on a first come first serve basis and we * must process each one atomically. */ static void reg_process_pending_hints(void) { struct regulatory_request *reg_request, *lr; lr = get_last_request(); /* When last_request->processed becomes true this will be rescheduled */ if (lr && !lr->processed) { pr_debug("Pending regulatory request, waiting for it to be processed...\n"); return; } spin_lock(&reg_requests_lock); if (list_empty(&reg_requests_list)) { spin_unlock(&reg_requests_lock); return; } reg_request = list_first_entry(&reg_requests_list, struct regulatory_request, list); list_del_init(&reg_request->list); spin_unlock(&reg_requests_lock); notify_self_managed_wiphys(reg_request); reg_process_hint(reg_request); lr = get_last_request(); spin_lock(&reg_requests_lock); if (!list_empty(&reg_requests_list) && lr && lr->processed) schedule_work(&reg_work); spin_unlock(&reg_requests_lock); } /* Processes beacon hints -- this has nothing to do with country IEs */ static void reg_process_pending_beacon_hints(void) { struct cfg80211_registered_device *rdev; struct reg_beacon *pending_beacon, *tmp; /* This goes through the _pending_ beacon list */ spin_lock_bh(&reg_pending_beacons_lock); list_for_each_entry_safe(pending_beacon, tmp, &reg_pending_beacons, list) { list_del_init(&pending_beacon->list); /* Applies the beacon hint to current wiphys */ for_each_rdev(rdev) wiphy_update_new_beacon(&rdev->wiphy, pending_beacon); /* Remembers the beacon hint for new wiphys or reg changes */ list_add_tail(&pending_beacon->list, &reg_beacon_list); } spin_unlock_bh(&reg_pending_beacons_lock); } static void reg_process_self_managed_hint(struct wiphy *wiphy) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); const struct ieee80211_regdomain *tmp; const struct ieee80211_regdomain *regd; enum nl80211_band band; struct regulatory_request request = {}; ASSERT_RTNL(); lockdep_assert_wiphy(wiphy); spin_lock(&reg_requests_lock); regd = rdev->requested_regd; rdev->requested_regd = NULL; spin_unlock(&reg_requests_lock); if (!regd) return; tmp = get_wiphy_regdom(wiphy); rcu_assign_pointer(wiphy->regd, regd); rcu_free_regdom(tmp); for (band = 0; band < NUM_NL80211_BANDS; band++) handle_band_custom(wiphy, wiphy->bands[band], regd); reg_process_ht_flags(wiphy); request.wiphy_idx = get_wiphy_idx(wiphy); request.alpha2[0] = regd->alpha2[0]; request.alpha2[1] = regd->alpha2[1]; request.initiator = NL80211_REGDOM_SET_BY_DRIVER; if (wiphy->flags & WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER) reg_call_notifier(wiphy, &request); nl80211_send_wiphy_reg_change_event(&request); } static void reg_process_self_managed_hints(void) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); for_each_rdev(rdev) { wiphy_lock(&rdev->wiphy); reg_process_self_managed_hint(&rdev->wiphy); wiphy_unlock(&rdev->wiphy); } reg_check_channels(); } static void reg_todo(struct work_struct *work) { rtnl_lock(); reg_process_pending_hints(); reg_process_pending_beacon_hints(); reg_process_self_managed_hints(); rtnl_unlock(); } static void queue_regulatory_request(struct regulatory_request *request) { request->alpha2[0] = toupper(request->alpha2[0]); request->alpha2[1] = toupper(request->alpha2[1]); spin_lock(&reg_requests_lock); list_add_tail(&request->list, &reg_requests_list); spin_unlock(&reg_requests_lock); schedule_work(&reg_work); } /* * Core regulatory hint -- happens during cfg80211_init() * and when we restore regulatory settings. */ static int regulatory_hint_core(const char *alpha2) { struct regulatory_request *request; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_CORE; request->wiphy_idx = WIPHY_IDX_INVALID; queue_regulatory_request(request); return 0; } /* User hints */ int regulatory_hint_user(const char *alpha2, enum nl80211_user_reg_hint_type user_reg_hint_type) { struct regulatory_request *request; if (WARN_ON(!alpha2)) return -EINVAL; if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2)) return -EINVAL; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->wiphy_idx = WIPHY_IDX_INVALID; request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_USER; request->user_reg_hint_type = user_reg_hint_type; /* Allow calling CRDA again */ reset_crda_timeouts(); queue_regulatory_request(request); return 0; } void regulatory_hint_indoor(bool is_indoor, u32 portid) { spin_lock(&reg_indoor_lock); /* It is possible that more than one user space process is trying to * configure the indoor setting. To handle such cases, clear the indoor * setting in case that some process does not think that the device * is operating in an indoor environment. In addition, if a user space * process indicates that it is controlling the indoor setting, save its * portid, i.e., make it the owner. */ reg_is_indoor = is_indoor; if (reg_is_indoor) { if (!reg_is_indoor_portid) reg_is_indoor_portid = portid; } else { reg_is_indoor_portid = 0; } spin_unlock(&reg_indoor_lock); if (!is_indoor) reg_check_channels(); } void regulatory_netlink_notify(u32 portid) { spin_lock(&reg_indoor_lock); if (reg_is_indoor_portid != portid) { spin_unlock(&reg_indoor_lock); return; } reg_is_indoor = false; reg_is_indoor_portid = 0; spin_unlock(&reg_indoor_lock); reg_check_channels(); } /* Driver hints */ int regulatory_hint(struct wiphy *wiphy, const char *alpha2) { struct regulatory_request *request; if (WARN_ON(!alpha2 || !wiphy)) return -EINVAL; wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG; request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); if (!request) return -ENOMEM; request->wiphy_idx = get_wiphy_idx(wiphy); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_DRIVER; /* Allow calling CRDA again */ reset_crda_timeouts(); queue_regulatory_request(request); return 0; } EXPORT_SYMBOL(regulatory_hint); void regulatory_hint_country_ie(struct wiphy *wiphy, enum nl80211_band band, const u8 *country_ie, u8 country_ie_len) { char alpha2[2]; enum environment_cap env = ENVIRON_ANY; struct regulatory_request *request = NULL, *lr; /* IE len must be evenly divisible by 2 */ if (country_ie_len & 0x01) return; if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) return; request = kzalloc(sizeof(*request), GFP_KERNEL); if (!request) return; alpha2[0] = country_ie[0]; alpha2[1] = country_ie[1]; if (country_ie[2] == 'I') env = ENVIRON_INDOOR; else if (country_ie[2] == 'O') env = ENVIRON_OUTDOOR; rcu_read_lock(); lr = get_last_request(); if (unlikely(!lr)) goto out; /* * We will run this only upon a successful connection on cfg80211. * We leave conflict resolution to the workqueue, where can hold * the RTNL. */ if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && lr->wiphy_idx != WIPHY_IDX_INVALID) goto out; request->wiphy_idx = get_wiphy_idx(wiphy); request->alpha2[0] = alpha2[0]; request->alpha2[1] = alpha2[1]; request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE; request->country_ie_env = env; /* Allow calling CRDA again */ reset_crda_timeouts(); queue_regulatory_request(request); request = NULL; out: kfree(request); rcu_read_unlock(); } static void restore_alpha2(char *alpha2, bool reset_user) { /* indicates there is no alpha2 to consider for restoration */ alpha2[0] = '9'; alpha2[1] = '7'; /* The user setting has precedence over the module parameter */ if (is_user_regdom_saved()) { /* Unless we're asked to ignore it and reset it */ if (reset_user) { pr_debug("Restoring regulatory settings including user preference\n"); user_alpha2[0] = '9'; user_alpha2[1] = '7'; /* * If we're ignoring user settings, we still need to * check the module parameter to ensure we put things * back as they were for a full restore. */ if (!is_world_regdom(ieee80211_regdom)) { pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n", ieee80211_regdom[0], ieee80211_regdom[1]); alpha2[0] = ieee80211_regdom[0]; alpha2[1] = ieee80211_regdom[1]; } } else { pr_debug("Restoring regulatory settings while preserving user preference for: %c%c\n", user_alpha2[0], user_alpha2[1]); alpha2[0] = user_alpha2[0]; alpha2[1] = user_alpha2[1]; } } else if (!is_world_regdom(ieee80211_regdom)) { pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n", ieee80211_regdom[0], ieee80211_regdom[1]); alpha2[0] = ieee80211_regdom[0]; alpha2[1] = ieee80211_regdom[1]; } else pr_debug("Restoring regulatory settings\n"); } static void restore_custom_reg_settings(struct wiphy *wiphy) { struct ieee80211_supported_band *sband; enum nl80211_band band; struct ieee80211_channel *chan; int i; for (band = 0; band < NUM_NL80211_BANDS; band++) { sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { chan = &sband->channels[i]; chan->flags = chan->orig_flags; chan->max_antenna_gain = chan->orig_mag; chan->max_power = chan->orig_mpwr; chan->beacon_found = false; } } } /* * Restoring regulatory settings involves ignoring any * possibly stale country IE information and user regulatory * settings if so desired, this includes any beacon hints * learned as we could have traveled outside to another country * after disconnection. To restore regulatory settings we do * exactly what we did at bootup: * * - send a core regulatory hint * - send a user regulatory hint if applicable * * Device drivers that send a regulatory hint for a specific country * keep their own regulatory domain on wiphy->regd so that does * not need to be remembered. */ static void restore_regulatory_settings(bool reset_user, bool cached) { char alpha2[2]; char world_alpha2[2]; struct reg_beacon *reg_beacon, *btmp; LIST_HEAD(tmp_reg_req_list); struct cfg80211_registered_device *rdev; ASSERT_RTNL(); /* * Clear the indoor setting in case that it is not controlled by user * space, as otherwise there is no guarantee that the device is still * operating in an indoor environment. */ spin_lock(&reg_indoor_lock); if (reg_is_indoor && !reg_is_indoor_portid) { reg_is_indoor = false; reg_check_channels(); } spin_unlock(&reg_indoor_lock); reset_regdomains(true, &world_regdom); restore_alpha2(alpha2, reset_user); /* * If there's any pending requests we simply * stash them to a temporary pending queue and * add then after we've restored regulatory * settings. */ spin_lock(&reg_requests_lock); list_splice_tail_init(&reg_requests_list, &tmp_reg_req_list); spin_unlock(&reg_requests_lock); /* Clear beacon hints */ spin_lock_bh(&reg_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, &reg_pending_beacons, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } spin_unlock_bh(&reg_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, &reg_beacon_list, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } /* First restore to the basic regulatory settings */ world_alpha2[0] = cfg80211_world_regdom->alpha2[0]; world_alpha2[1] = cfg80211_world_regdom->alpha2[1]; for_each_rdev(rdev) { if (rdev->wiphy.regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) continue; if (rdev->wiphy.regulatory_flags & REGULATORY_CUSTOM_REG) restore_custom_reg_settings(&rdev->wiphy); } if (cached && (!is_an_alpha2(alpha2) || !IS_ERR_OR_NULL(cfg80211_user_regdom))) { reset_regdomains(false, cfg80211_world_regdom); update_all_wiphy_regulatory(NL80211_REGDOM_SET_BY_CORE); print_regdomain(get_cfg80211_regdom()); nl80211_send_reg_change_event(&core_request_world); reg_set_request_processed(); if (is_an_alpha2(alpha2) && !regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER)) { struct regulatory_request *ureq; spin_lock(&reg_requests_lock); ureq = list_last_entry(&reg_requests_list, struct regulatory_request, list); list_del(&ureq->list); spin_unlock(&reg_requests_lock); notify_self_managed_wiphys(ureq); reg_update_last_request(ureq); set_regdom(reg_copy_regd(cfg80211_user_regdom), REGD_SOURCE_CACHED); } } else { regulatory_hint_core(world_alpha2); /* * This restores the ieee80211_regdom module parameter * preference or the last user requested regulatory * settings, user regulatory settings takes precedence. */ if (is_an_alpha2(alpha2)) regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER); } spin_lock(&reg_requests_lock); list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list); spin_unlock(&reg_requests_lock); pr_debug("Kicking the queue\n"); schedule_work(&reg_work); } static bool is_wiphy_all_set_reg_flag(enum ieee80211_regulatory_flags flag) { struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; for_each_rdev(rdev) { wiphy_lock(&rdev->wiphy); list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { if (!(wdev->wiphy->regulatory_flags & flag)) { wiphy_unlock(&rdev->wiphy); return false; } } wiphy_unlock(&rdev->wiphy); } return true; } void regulatory_hint_disconnect(void) { /* Restore of regulatory settings is not required when wiphy(s) * ignore IE from connected access point but clearance of beacon hints * is required when wiphy(s) supports beacon hints. */ if (is_wiphy_all_set_reg_flag(REGULATORY_COUNTRY_IE_IGNORE)) { struct reg_beacon *reg_beacon, *btmp; if (is_wiphy_all_set_reg_flag(REGULATORY_DISABLE_BEACON_HINTS)) return; spin_lock_bh(&reg_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, &reg_pending_beacons, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } spin_unlock_bh(&reg_pending_beacons_lock); list_for_each_entry_safe(reg_beacon, btmp, &reg_beacon_list, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } return; } pr_debug("All devices are disconnected, going to restore regulatory settings\n"); restore_regulatory_settings(false, true); } static bool freq_is_chan_12_13_14(u32 freq) { if (freq == ieee80211_channel_to_frequency(12, NL80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(13, NL80211_BAND_2GHZ) || freq == ieee80211_channel_to_frequency(14, NL80211_BAND_2GHZ)) return true; return false; } static bool pending_reg_beacon(struct ieee80211_channel *beacon_chan) { struct reg_beacon *pending_beacon; list_for_each_entry(pending_beacon, &reg_pending_beacons, list) if (ieee80211_channel_equal(beacon_chan, &pending_beacon->chan)) return true; return false; } void regulatory_hint_found_beacon(struct wiphy *wiphy, struct ieee80211_channel *beacon_chan, gfp_t gfp) { struct reg_beacon *reg_beacon; bool processing; if (beacon_chan->beacon_found || beacon_chan->flags & IEEE80211_CHAN_RADAR || (beacon_chan->band == NL80211_BAND_2GHZ && !freq_is_chan_12_13_14(beacon_chan->center_freq))) return; spin_lock_bh(&reg_pending_beacons_lock); processing = pending_reg_beacon(beacon_chan); spin_unlock_bh(&reg_pending_beacons_lock); if (processing) return; reg_beacon = kzalloc(sizeof(struct reg_beacon), gfp); if (!reg_beacon) return; pr_debug("Found new beacon on frequency: %d.%03d MHz (Ch %d) on %s\n", beacon_chan->center_freq, beacon_chan->freq_offset, ieee80211_freq_khz_to_channel( ieee80211_channel_to_khz(beacon_chan)), wiphy_name(wiphy)); memcpy(&reg_beacon->chan, beacon_chan, sizeof(struct ieee80211_channel)); /* * Since we can be called from BH or and non-BH context * we must use spin_lock_bh() */ spin_lock_bh(&reg_pending_beacons_lock); list_add_tail(&reg_beacon->list, &reg_pending_beacons); spin_unlock_bh(&reg_pending_beacons_lock); schedule_work(&reg_work); } static void print_rd_rules(const struct ieee80211_regdomain *rd) { unsigned int i; const struct ieee80211_reg_rule *reg_rule = NULL; const struct ieee80211_freq_range *freq_range = NULL; const struct ieee80211_power_rule *power_rule = NULL; char bw[32], cac_time[32]; pr_debug(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n"); for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; freq_range = &reg_rule->freq_range; power_rule = &reg_rule->power_rule; if (reg_rule->flags & NL80211_RRF_AUTO_BW) snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO", freq_range->max_bandwidth_khz, reg_get_max_bandwidth(rd, reg_rule)); else snprintf(bw, sizeof(bw), "%d KHz", freq_range->max_bandwidth_khz); if (reg_rule->flags & NL80211_RRF_DFS) scnprintf(cac_time, sizeof(cac_time), "%u s", reg_rule->dfs_cac_ms/1000); else scnprintf(cac_time, sizeof(cac_time), "N/A"); /* * There may not be documentation for max antenna gain * in certain regions */ if (power_rule->max_antenna_gain) pr_debug(" (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, bw, power_rule->max_antenna_gain, power_rule->max_eirp, cac_time); else pr_debug(" (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, bw, power_rule->max_eirp, cac_time); } } bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region) { switch (dfs_region) { case NL80211_DFS_UNSET: case NL80211_DFS_FCC: case NL80211_DFS_ETSI: case NL80211_DFS_JP: return true; default: pr_debug("Ignoring unknown DFS master region: %d\n", dfs_region); return false; } } static void print_regdomain(const struct ieee80211_regdomain *rd) { struct regulatory_request *lr = get_last_request(); if (is_intersected_alpha2(rd->alpha2)) { if (lr->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) { struct cfg80211_registered_device *rdev; rdev = cfg80211_rdev_by_wiphy_idx(lr->wiphy_idx); if (rdev) { pr_debug("Current regulatory domain updated by AP to: %c%c\n", rdev->country_ie_alpha2[0], rdev->country_ie_alpha2[1]); } else pr_debug("Current regulatory domain intersected:\n"); } else pr_debug("Current regulatory domain intersected:\n"); } else if (is_world_regdom(rd->alpha2)) { pr_debug("World regulatory domain updated:\n"); } else { if (is_unknown_alpha2(rd->alpha2)) pr_debug("Regulatory domain changed to driver built-in settings (unknown country)\n"); else { if (reg_request_cell_base(lr)) pr_debug("Regulatory domain changed to country: %c%c by Cell Station\n", rd->alpha2[0], rd->alpha2[1]); else pr_debug("Regulatory domain changed to country: %c%c\n", rd->alpha2[0], rd->alpha2[1]); } } pr_debug(" DFS Master region: %s", reg_dfs_region_str(rd->dfs_region)); print_rd_rules(rd); } static void print_regdomain_info(const struct ieee80211_regdomain *rd) { pr_debug("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_rd_rules(rd); } static int reg_set_rd_core(const struct ieee80211_regdomain *rd) { if (!is_world_regdom(rd->alpha2)) return -EINVAL; update_world_regdomain(rd); return 0; } static int reg_set_rd_user(const struct ieee80211_regdomain *rd, struct regulatory_request *user_request) { const struct ieee80211_regdomain *intersected_rd = NULL; if (!regdom_changes(rd->alpha2)) return -EALREADY; if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } if (!user_request->intersect) { reset_regdomains(false, rd); return 0; } intersected_rd = regdom_intersect(rd, get_cfg80211_regdom()); if (!intersected_rd) return -EINVAL; kfree(rd); rd = NULL; reset_regdomains(false, intersected_rd); return 0; } static int reg_set_rd_driver(const struct ieee80211_regdomain *rd, struct regulatory_request *driver_request) { const struct ieee80211_regdomain *regd; const struct ieee80211_regdomain *intersected_rd = NULL; const struct ieee80211_regdomain *tmp = NULL; struct wiphy *request_wiphy; if (is_world_regdom(rd->alpha2)) return -EINVAL; if (!regdom_changes(rd->alpha2)) return -EALREADY; if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx); if (!request_wiphy) return -ENODEV; if (!driver_request->intersect) { ASSERT_RTNL(); wiphy_lock(request_wiphy); if (request_wiphy->regd) tmp = get_wiphy_regdom(request_wiphy); regd = reg_copy_regd(rd); if (IS_ERR(regd)) { wiphy_unlock(request_wiphy); return PTR_ERR(regd); } rcu_assign_pointer(request_wiphy->regd, regd); rcu_free_regdom(tmp); wiphy_unlock(request_wiphy); reset_regdomains(false, rd); return 0; } intersected_rd = regdom_intersect(rd, get_cfg80211_regdom()); if (!intersected_rd) return -EINVAL; /* * We can trash what CRDA provided now. * However if a driver requested this specific regulatory * domain we keep it for its private use */ tmp = get_wiphy_regdom(request_wiphy); rcu_assign_pointer(request_wiphy->regd, rd); rcu_free_regdom(tmp); rd = NULL; reset_regdomains(false, intersected_rd); return 0; } static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd, struct regulatory_request *country_ie_request) { struct wiphy *request_wiphy; if (!is_alpha2_set(rd->alpha2) && !is_an_alpha2(rd->alpha2) && !is_unknown_alpha2(rd->alpha2)) return -EINVAL; /* * Lets only bother proceeding on the same alpha2 if the current * rd is non static (it means CRDA was present and was used last) * and the pending request came in from a country IE */ if (!is_valid_rd(rd)) { pr_err("Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx); if (!request_wiphy) return -ENODEV; if (country_ie_request->intersect) return -EINVAL; reset_regdomains(false, rd); return 0; } /* * Use this call to set the current regulatory domain. Conflicts with * multiple drivers can be ironed out later. Caller must've already * kmalloc'd the rd structure. */ int set_regdom(const struct ieee80211_regdomain *rd, enum ieee80211_regd_source regd_src) { struct regulatory_request *lr; bool user_reset = false; int r; if (IS_ERR_OR_NULL(rd)) return -ENODATA; if (!reg_is_valid_request(rd->alpha2)) { kfree(rd); return -EINVAL; } if (regd_src == REGD_SOURCE_CRDA) reset_crda_timeouts(); lr = get_last_request(); /* Note that this doesn't update the wiphys, this is done below */ switch (lr->initiator) { case NL80211_REGDOM_SET_BY_CORE: r = reg_set_rd_core(rd); break; case NL80211_REGDOM_SET_BY_USER: cfg80211_save_user_regdom(rd); r = reg_set_rd_user(rd, lr); user_reset = true; break; case NL80211_REGDOM_SET_BY_DRIVER: r = reg_set_rd_driver(rd, lr); break; case NL80211_REGDOM_SET_BY_COUNTRY_IE: r = reg_set_rd_country_ie(rd, lr); break; default: WARN(1, "invalid initiator %d\n", lr->initiator); kfree(rd); return -EINVAL; } if (r) { switch (r) { case -EALREADY: reg_set_request_processed(); break; default: /* Back to world regulatory in case of errors */ restore_regulatory_settings(user_reset, false); } kfree(rd); return r; } /* This would make this whole thing pointless */ if (WARN_ON(!lr->intersect && rd != get_cfg80211_regdom())) return -EINVAL; /* update all wiphys now with the new established regulatory domain */ update_all_wiphy_regulatory(lr->initiator); print_regdomain(get_cfg80211_regdom()); nl80211_send_reg_change_event(lr); reg_set_request_processed(); return 0; } static int __regulatory_set_wiphy_regd(struct wiphy *wiphy, struct ieee80211_regdomain *rd) { const struct ieee80211_regdomain *regd; const struct ieee80211_regdomain *prev_regd; struct cfg80211_registered_device *rdev; if (WARN_ON(!wiphy || !rd)) return -EINVAL; if (WARN(!(wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED), "wiphy should have REGULATORY_WIPHY_SELF_MANAGED\n")) return -EPERM; if (WARN(!is_valid_rd(rd), "Invalid regulatory domain detected: %c%c\n", rd->alpha2[0], rd->alpha2[1])) { print_regdomain_info(rd); return -EINVAL; } regd = reg_copy_regd(rd); if (IS_ERR(regd)) return PTR_ERR(regd); rdev = wiphy_to_rdev(wiphy); spin_lock(&reg_requests_lock); prev_regd = rdev->requested_regd; rdev->requested_regd = regd; spin_unlock(&reg_requests_lock); kfree(prev_regd); return 0; } int regulatory_set_wiphy_regd(struct wiphy *wiphy, struct ieee80211_regdomain *rd) { int ret = __regulatory_set_wiphy_regd(wiphy, rd); if (ret) return ret; schedule_work(&reg_work); return 0; } EXPORT_SYMBOL(regulatory_set_wiphy_regd); int regulatory_set_wiphy_regd_sync(struct wiphy *wiphy, struct ieee80211_regdomain *rd) { int ret; ASSERT_RTNL(); ret = __regulatory_set_wiphy_regd(wiphy, rd); if (ret) return ret; /* process the request immediately */ reg_process_self_managed_hint(wiphy); reg_check_channels(); return 0; } EXPORT_SYMBOL(regulatory_set_wiphy_regd_sync); void wiphy_regulatory_register(struct wiphy *wiphy) { struct regulatory_request *lr = get_last_request(); /* self-managed devices ignore beacon hints and country IE */ if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) { wiphy->regulatory_flags |= REGULATORY_DISABLE_BEACON_HINTS | REGULATORY_COUNTRY_IE_IGNORE; /* * The last request may have been received before this * registration call. Call the driver notifier if * initiator is USER. */ if (lr->initiator == NL80211_REGDOM_SET_BY_USER) reg_call_notifier(wiphy, lr); } if (!reg_dev_ignore_cell_hint(wiphy)) reg_num_devs_support_basehint++; wiphy_update_regulatory(wiphy, lr->initiator); wiphy_all_share_dfs_chan_state(wiphy); reg_process_self_managed_hints(); } void wiphy_regulatory_deregister(struct wiphy *wiphy) { struct wiphy *request_wiphy = NULL; struct regulatory_request *lr; lr = get_last_request(); if (!reg_dev_ignore_cell_hint(wiphy)) reg_num_devs_support_basehint--; rcu_free_regdom(get_wiphy_regdom(wiphy)); RCU_INIT_POINTER(wiphy->regd, NULL); if (lr) request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx); if (!request_wiphy || request_wiphy != wiphy) return; lr->wiphy_idx = WIPHY_IDX_INVALID; lr->country_ie_env = ENVIRON_ANY; } /* * See FCC notices for UNII band definitions * 5GHz: https://www.fcc.gov/document/5-ghz-unlicensed-spectrum-unii * 6GHz: https://www.fcc.gov/document/fcc-proposes-more-spectrum-unlicensed-use-0 */ int cfg80211_get_unii(int freq) { /* UNII-1 */ if (freq >= 5150 && freq <= 5250) return 0; /* UNII-2A */ if (freq > 5250 && freq <= 5350) return 1; /* UNII-2B */ if (freq > 5350 && freq <= 5470) return 2; /* UNII-2C */ if (freq > 5470 && freq <= 5725) return 3; /* UNII-3 */ if (freq > 5725 && freq <= 5825) return 4; /* UNII-5 */ if (freq > 5925 && freq <= 6425) return 5; /* UNII-6 */ if (freq > 6425 && freq <= 6525) return 6; /* UNII-7 */ if (freq > 6525 && freq <= 6875) return 7; /* UNII-8 */ if (freq > 6875 && freq <= 7125) return 8; return -EINVAL; } bool regulatory_indoor_allowed(void) { return reg_is_indoor; } bool regulatory_pre_cac_allowed(struct wiphy *wiphy) { const struct ieee80211_regdomain *regd = NULL; const struct ieee80211_regdomain *wiphy_regd = NULL; bool pre_cac_allowed = false; rcu_read_lock(); regd = rcu_dereference(cfg80211_regdomain); wiphy_regd = rcu_dereference(wiphy->regd); if (!wiphy_regd) { if (regd->dfs_region == NL80211_DFS_ETSI) pre_cac_allowed = true; rcu_read_unlock(); return pre_cac_allowed; } if (regd->dfs_region == wiphy_regd->dfs_region && wiphy_regd->dfs_region == NL80211_DFS_ETSI) pre_cac_allowed = true; rcu_read_unlock(); return pre_cac_allowed; } EXPORT_SYMBOL(regulatory_pre_cac_allowed); static void cfg80211_check_and_end_cac(struct cfg80211_registered_device *rdev) { struct wireless_dev *wdev; unsigned int link_id; /* If we finished CAC or received radar, we should end any * CAC running on the same channels. * the check !cfg80211_chandef_dfs_usable contain 2 options: * either all channels are available - those the CAC_FINISHED * event has effected another wdev state, or there is a channel * in unavailable state in wdev chandef - those the RADAR_DETECTED * event has effected another wdev state. * In both cases we should end the CAC on the wdev. */ list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) { struct cfg80211_chan_def *chandef; for_each_valid_link(wdev, link_id) { if (!wdev->links[link_id].cac_started) continue; chandef = wdev_chandef(wdev, link_id); if (!chandef) continue; if (!cfg80211_chandef_dfs_usable(&rdev->wiphy, chandef)) rdev_end_cac(rdev, wdev->netdev, link_id); } } } void regulatory_propagate_dfs_state(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_dfs_state dfs_state, enum nl80211_radar_event event) { struct cfg80211_registered_device *rdev; ASSERT_RTNL(); if (WARN_ON(!cfg80211_chandef_valid(chandef))) return; for_each_rdev(rdev) { if (wiphy == &rdev->wiphy) continue; if (!reg_dfs_domain_same(wiphy, &rdev->wiphy)) continue; if (!ieee80211_get_channel(&rdev->wiphy, chandef->chan->center_freq)) continue; cfg80211_set_dfs_state(&rdev->wiphy, chandef, dfs_state); if (event == NL80211_RADAR_DETECTED || event == NL80211_RADAR_CAC_FINISHED) { cfg80211_sched_dfs_chan_update(rdev); cfg80211_check_and_end_cac(rdev); } nl80211_radar_notify(rdev, chandef, event, NULL, GFP_KERNEL); } } static int __init regulatory_init_db(void) { int err; /* * It's possible that - due to other bugs/issues - cfg80211 * never called regulatory_init() below, or that it failed; * in that case, don't try to do any further work here as * it's doomed to lead to crashes. */ if (IS_ERR_OR_NULL(reg_pdev)) return -EINVAL; err = load_builtin_regdb_keys(); if (err) { platform_device_unregister(reg_pdev); return err; } /* We always try to get an update for the static regdomain */ err = regulatory_hint_core(cfg80211_world_regdom->alpha2); if (err) { if (err == -ENOMEM) { platform_device_unregister(reg_pdev); return err; } /* * N.B. kobject_uevent_env() can fail mainly for when we're out * memory which is handled and propagated appropriately above * but it can also fail during a netlink_broadcast() or during * early boot for call_usermodehelper(). For now treat these * errors as non-fatal. */ pr_err("kobject_uevent_env() was unable to call CRDA during init\n"); } /* * Finally, if the user set the module parameter treat it * as a user hint. */ if (!is_world_regdom(ieee80211_regdom)) regulatory_hint_user(ieee80211_regdom, NL80211_USER_REG_HINT_USER); return 0; } #ifndef MODULE late_initcall(regulatory_init_db); #endif int __init regulatory_init(void) { reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0); if (IS_ERR(reg_pdev)) return PTR_ERR(reg_pdev); rcu_assign_pointer(cfg80211_regdomain, cfg80211_world_regdom); user_alpha2[0] = '9'; user_alpha2[1] = '7'; #ifdef MODULE return regulatory_init_db(); #else return 0; #endif } void regulatory_exit(void) { struct regulatory_request *reg_request, *tmp; struct reg_beacon *reg_beacon, *btmp; cancel_work_sync(&reg_work); cancel_crda_timeout_sync(); cancel_delayed_work_sync(&reg_check_chans); /* Lock to suppress warnings */ rtnl_lock(); reset_regdomains(true, NULL); rtnl_unlock(); dev_set_uevent_suppress(&reg_pdev->dev, true); platform_device_unregister(reg_pdev); list_for_each_entry_safe(reg_beacon, btmp, &reg_pending_beacons, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } list_for_each_entry_safe(reg_beacon, btmp, &reg_beacon_list, list) { list_del(&reg_beacon->list); kfree(reg_beacon); } list_for_each_entry_safe(reg_request, tmp, &reg_requests_list, list) { list_del(&reg_request->list); kfree(reg_request); } if (!IS_ERR_OR_NULL(regdb)) kfree(regdb); if (!IS_ERR_OR_NULL(cfg80211_user_regdom)) kfree(cfg80211_user_regdom); free_regdb_keyring(); }
7 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 // SPDX-License-Identifier: GPL-2.0 /* * USB Serial Converter stuff * * Copyright (C) 1999 - 2012 * Greg Kroah-Hartman (greg@kroah.com) */ #ifndef __LINUX_USB_SERIAL_H #define __LINUX_USB_SERIAL_H #include <linux/kref.h> #include <linux/mutex.h> #include <linux/serial.h> #include <linux/kfifo.h> /* The maximum number of ports one device can grab at once */ #define MAX_NUM_PORTS 16 /* USB serial flags */ #define USB_SERIAL_WRITE_BUSY 0 #define USB_SERIAL_THROTTLED 1 /** * usb_serial_port: structure for the specific ports of a device. * @serial: pointer back to the struct usb_serial owner of this port. * @port: pointer to the corresponding tty_port for this port. * @lock: spinlock to grab when updating portions of this structure. * @minor: the minor number of the port * @port_number: the struct usb_serial port number of this port (starts at 0) * @interrupt_in_buffer: pointer to the interrupt in buffer for this port. * @interrupt_in_urb: pointer to the interrupt in struct urb for this port. * @interrupt_in_endpointAddress: endpoint address for the interrupt in pipe * for this port. * @interrupt_out_buffer: pointer to the interrupt out buffer for this port. * @interrupt_out_size: the size of the interrupt_out_buffer, in bytes. * @interrupt_out_urb: pointer to the interrupt out struct urb for this port. * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe * for this port. * @bulk_in_buffer: pointer to the bulk in buffer for this port. * @bulk_in_size: the size of the bulk_in_buffer, in bytes. * @read_urb: pointer to the bulk in struct urb for this port. * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this * port. * @bulk_in_buffers: pointers to the bulk in buffers for this port * @read_urbs: pointers to the bulk in urbs for this port * @read_urbs_free: status bitmap the for bulk in urbs * @bulk_out_buffer: pointer to the bulk out buffer for this port. * @bulk_out_size: the size of the bulk_out_buffer, in bytes. * @write_urb: pointer to the bulk out struct urb for this port. * @write_fifo: kfifo used to buffer outgoing data * @bulk_out_buffers: pointers to the bulk out buffers for this port * @write_urbs: pointers to the bulk out urbs for this port * @write_urbs_free: status bitmap the for bulk out urbs * @icount: interrupt counters * @tx_bytes: number of bytes currently in host stack queues * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this * port. * @flags: usb serial port flags * @work: work queue entry for the line discipline waking up. * @dev: pointer to the serial device * * This structure is used by the usb-serial core and drivers for the specific * ports of a device. */ struct usb_serial_port { struct usb_serial *serial; struct tty_port port; spinlock_t lock; u32 minor; u8 port_number; unsigned char *interrupt_in_buffer; struct urb *interrupt_in_urb; __u8 interrupt_in_endpointAddress; unsigned char *interrupt_out_buffer; int interrupt_out_size; struct urb *interrupt_out_urb; __u8 interrupt_out_endpointAddress; unsigned char *bulk_in_buffer; int bulk_in_size; struct urb *read_urb; __u8 bulk_in_endpointAddress; unsigned char *bulk_in_buffers[2]; struct urb *read_urbs[2]; unsigned long read_urbs_free; unsigned char *bulk_out_buffer; int bulk_out_size; struct urb *write_urb; struct kfifo write_fifo; unsigned char *bulk_out_buffers[2]; struct urb *write_urbs[2]; unsigned long write_urbs_free; __u8 bulk_out_endpointAddress; struct async_icount icount; int tx_bytes; unsigned long flags; struct work_struct work; unsigned long sysrq; /* sysrq timeout */ struct device dev; }; #define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev) /* get and set the port private data pointer helper functions */ static inline void *usb_get_serial_port_data(struct usb_serial_port *port) { return dev_get_drvdata(&port->dev); } static inline void usb_set_serial_port_data(struct usb_serial_port *port, void *data) { dev_set_drvdata(&port->dev, data); } /** * usb_serial - structure used by the usb-serial core for a device * @dev: pointer to the struct usb_device for this device * @type: pointer to the struct usb_serial_driver for this device * @interface: pointer to the struct usb_interface for this device * @sibling: pointer to the struct usb_interface of any sibling interface * @suspend_count: number of suspended (sibling) interfaces * @num_ports: the number of ports this device has * @num_interrupt_in: number of interrupt in endpoints we have * @num_interrupt_out: number of interrupt out endpoints we have * @num_bulk_in: number of bulk in endpoints we have * @num_bulk_out: number of bulk out endpoints we have * @port: array of struct usb_serial_port structures for the different ports. * @private: place to put any driver specific information that is needed. The * usb-serial driver is required to manage this data, the usb-serial core * will not touch this. Use usb_get_serial_data() and * usb_set_serial_data() to access this. */ struct usb_serial { struct usb_device *dev; struct usb_serial_driver *type; struct usb_interface *interface; struct usb_interface *sibling; unsigned int suspend_count; unsigned char disconnected:1; unsigned char attached:1; unsigned char minors_reserved:1; unsigned char num_ports; unsigned char num_port_pointers; unsigned char num_interrupt_in; unsigned char num_interrupt_out; unsigned char num_bulk_in; unsigned char num_bulk_out; struct usb_serial_port *port[MAX_NUM_PORTS]; struct kref kref; struct mutex disc_mutex; void *private; }; #define to_usb_serial(d) container_of(d, struct usb_serial, kref) /* get and set the serial private data pointer helper functions */ static inline void *usb_get_serial_data(struct usb_serial *serial) { return serial->private; } static inline void usb_set_serial_data(struct usb_serial *serial, void *data) { serial->private = data; } struct usb_serial_endpoints { unsigned char num_bulk_in; unsigned char num_bulk_out; unsigned char num_interrupt_in; unsigned char num_interrupt_out; struct usb_endpoint_descriptor *bulk_in[MAX_NUM_PORTS]; struct usb_endpoint_descriptor *bulk_out[MAX_NUM_PORTS]; struct usb_endpoint_descriptor *interrupt_in[MAX_NUM_PORTS]; struct usb_endpoint_descriptor *interrupt_out[MAX_NUM_PORTS]; }; /** * usb_serial_driver - describes a usb serial driver * @description: pointer to a string that describes this driver. This string * used in the syslog messages when a device is inserted or removed. * @id_table: pointer to a list of usb_device_id structures that define all * of the devices this structure can support. * @num_ports: the number of different ports this device will have. * @num_bulk_in: minimum number of bulk-in endpoints * @num_bulk_out: minimum number of bulk-out endpoints * @num_interrupt_in: minimum number of interrupt-in endpoints * @num_interrupt_out: minimum number of interrupt-out endpoints * @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer * (0 = end-point size) * @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size) * @calc_num_ports: pointer to a function to determine how many ports this * device has dynamically. It can also be used to verify the number of * endpoints or to modify the port-endpoint mapping. It will be called * after the probe() callback is called, but before attach(). * @probe: pointer to the driver's probe function. * This will be called when the device is inserted into the system, * but before the device has been fully initialized by the usb_serial * subsystem. Use this function to download any firmware to the device, * or any other early initialization that might be needed. * Return 0 to continue on with the initialization sequence. Anything * else will abort it. * @attach: pointer to the driver's attach function. * This will be called when the struct usb_serial structure is fully * set up. Do any local initialization of the device, or any private * memory structure allocation at this point in time. * @disconnect: pointer to the driver's disconnect function. This will be * called when the device is unplugged or unbound from the driver. * @release: pointer to the driver's release function. This will be called * when the usb_serial data structure is about to be destroyed. * @usb_driver: pointer to the struct usb_driver that controls this * device. This is necessary to allow dynamic ids to be added to * the driver from sysfs. * * This structure is defines a USB Serial driver. It provides all of * the information that the USB serial core code needs. If the function * pointers are defined, then the USB serial core code will call them when * the corresponding tty port functions are called. If they are not * called, the generic serial function will be used instead. * * The driver.owner field should be set to the module owner of this driver. * The driver.name field should be set to the name of this driver (remember * it will show up in sysfs, so it needs to be short and to the point. * Using the module name is a good idea.) */ struct usb_serial_driver { const char *description; const struct usb_device_id *id_table; struct list_head driver_list; struct device_driver driver; struct usb_driver *usb_driver; struct usb_dynids dynids; unsigned char num_ports; unsigned char num_bulk_in; unsigned char num_bulk_out; unsigned char num_interrupt_in; unsigned char num_interrupt_out; size_t bulk_in_size; size_t bulk_out_size; int (*probe)(struct usb_serial *serial, const struct usb_device_id *id); int (*attach)(struct usb_serial *serial); int (*calc_num_ports)(struct usb_serial *serial, struct usb_serial_endpoints *epds); void (*disconnect)(struct usb_serial *serial); void (*release)(struct usb_serial *serial); int (*port_probe)(struct usb_serial_port *port); void (*port_remove)(struct usb_serial_port *port); int (*suspend)(struct usb_serial *serial, pm_message_t message); int (*resume)(struct usb_serial *serial); int (*reset_resume)(struct usb_serial *serial); /* serial function calls */ /* Called by console and by the tty layer */ int (*open)(struct tty_struct *tty, struct usb_serial_port *port); void (*close)(struct usb_serial_port *port); int (*write)(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); /* Called only by the tty layer */ unsigned int (*write_room)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); void (*get_serial)(struct tty_struct *tty, struct serial_struct *ss); int (*set_serial)(struct tty_struct *tty, struct serial_struct *ss); void (*set_termios)(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old); int (*break_ctl)(struct tty_struct *tty, int break_state); unsigned int (*chars_in_buffer)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, long timeout); bool (*tx_empty)(struct usb_serial_port *port); void (*throttle)(struct tty_struct *tty); void (*unthrottle)(struct tty_struct *tty); int (*tiocmget)(struct tty_struct *tty); int (*tiocmset)(struct tty_struct *tty, unsigned int set, unsigned int clear); int (*tiocmiwait)(struct tty_struct *tty, unsigned long arg); int (*get_icount)(struct tty_struct *tty, struct serial_icounter_struct *icount); /* Called by the tty layer for port level work. There may or may not be an attached tty at this point */ void (*dtr_rts)(struct usb_serial_port *port, int on); int (*carrier_raised)(struct usb_serial_port *port); /* Called by the usb serial hooks to allow the user to rework the termios state */ void (*init_termios)(struct tty_struct *tty); /* USB events */ void (*read_int_callback)(struct urb *urb); void (*write_int_callback)(struct urb *urb); void (*read_bulk_callback)(struct urb *urb); void (*write_bulk_callback)(struct urb *urb); /* Called by the generic read bulk callback */ void (*process_read_urb)(struct urb *urb); /* Called by the generic write implementation */ int (*prepare_write_buffer)(struct usb_serial_port *port, void *dest, size_t size); }; #define to_usb_serial_driver(d) \ container_of(d, struct usb_serial_driver, driver) #define usb_serial_register_drivers(serial_drivers, name, id_table) \ __usb_serial_register_drivers(serial_drivers, THIS_MODULE, name, id_table) int __usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[], struct module *owner, const char *name, const struct usb_device_id *id_table); void usb_serial_deregister_drivers(struct usb_serial_driver *const serial_drivers[]); void usb_serial_port_softint(struct usb_serial_port *port); int usb_serial_suspend(struct usb_interface *intf, pm_message_t message); int usb_serial_resume(struct usb_interface *intf); /* USB Serial console functions */ #ifdef CONFIG_USB_SERIAL_CONSOLE void usb_serial_console_init(int minor); void usb_serial_console_exit(void); void usb_serial_console_disconnect(struct usb_serial *serial); #else static inline void usb_serial_console_init(int minor) { } static inline void usb_serial_console_exit(void) { } static inline void usb_serial_console_disconnect(struct usb_serial *serial) {} #endif /* Functions needed by other parts of the usbserial core */ struct usb_serial_port *usb_serial_port_get_by_minor(unsigned int minor); void usb_serial_put(struct usb_serial *serial); int usb_serial_claim_interface(struct usb_serial *serial, struct usb_interface *intf); int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port); int usb_serial_generic_write_start(struct usb_serial_port *port, gfp_t mem_flags); int usb_serial_generic_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); void usb_serial_generic_close(struct usb_serial_port *port); int usb_serial_generic_resume(struct usb_serial *serial); unsigned int usb_serial_generic_write_room(struct tty_struct *tty); unsigned int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout); void usb_serial_generic_read_bulk_callback(struct urb *urb); void usb_serial_generic_write_bulk_callback(struct urb *urb); void usb_serial_generic_throttle(struct tty_struct *tty); void usb_serial_generic_unthrottle(struct tty_struct *tty); int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg); int usb_serial_generic_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount); int usb_serial_generic_register(void); void usb_serial_generic_deregister(void); int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port, gfp_t mem_flags); void usb_serial_generic_process_read_urb(struct urb *urb); int usb_serial_generic_prepare_write_buffer(struct usb_serial_port *port, void *dest, size_t size); #if defined(CONFIG_USB_SERIAL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch); int usb_serial_handle_break(struct usb_serial_port *port); #else static inline int usb_serial_handle_sysrq_char(struct usb_serial_port *port, unsigned int ch) { return 0; } static inline int usb_serial_handle_break(struct usb_serial_port *port) { return 0; } #endif void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port, struct tty_struct *tty, unsigned int status); int usb_serial_bus_register(struct usb_serial_driver *device); void usb_serial_bus_deregister(struct usb_serial_driver *device); extern const struct bus_type usb_serial_bus_type; extern struct tty_driver *usb_serial_tty_driver; static inline void usb_serial_debug_data(struct device *dev, const char *function, int size, const unsigned char *data) { dev_dbg(dev, "%s - length = %d, data = %*ph\n", function, size, size, data); } /* * Macro for reporting errors in write path to avoid infinite loop * when port is used as a console. */ #define dev_err_console(usport, fmt, ...) \ do { \ static bool __print_once; \ struct usb_serial_port *__port = (usport); \ \ if (!__port->port.console || !__print_once) { \ __print_once = true; \ dev_err(&__port->dev, fmt, ##__VA_ARGS__); \ } \ } while (0) /* * module_usb_serial_driver() - Helper macro for registering a USB Serial driver * @__serial_drivers: list of usb_serial drivers to register * @__ids: all device ids that @__serial_drivers bind to * * Helper macro for USB serial drivers which do not do anything special * in module init/exit. This eliminates a lot of boilerplate. Each * module may only use this macro once, and calling it replaces * module_init() and module_exit() * */ #define usb_serial_module_driver(__name, __serial_drivers, __ids) \ static int __init usb_serial_module_init(void) \ { \ return usb_serial_register_drivers(__serial_drivers, \ __name, __ids); \ } \ module_init(usb_serial_module_init); \ static void __exit usb_serial_module_exit(void) \ { \ usb_serial_deregister_drivers(__serial_drivers); \ } \ module_exit(usb_serial_module_exit); #define module_usb_serial_driver(__serial_drivers, __ids) \ usb_serial_module_driver(KBUILD_MODNAME, __serial_drivers, __ids) #endif /* __LINUX_USB_SERIAL_H */
38 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 // SPDX-License-Identifier: GPL-2.0 #include <linux/fs.h> #include <linux/export.h> /* * fs on-disk file type to dirent file type conversion */ static const unsigned char fs_dtype_by_ftype[FT_MAX] = { [FT_UNKNOWN] = DT_UNKNOWN, [FT_REG_FILE] = DT_REG, [FT_DIR] = DT_DIR, [FT_CHRDEV] = DT_CHR, [FT_BLKDEV] = DT_BLK, [FT_FIFO] = DT_FIFO, [FT_SOCK] = DT_SOCK, [FT_SYMLINK] = DT_LNK }; /** * fs_ftype_to_dtype() - fs on-disk file type to dirent type. * @filetype: The on-disk file type to convert. * * This function converts the on-disk file type value (FT_*) to the directory * entry type (DT_*). * * Context: Any context. * Return: * * DT_UNKNOWN - Unknown type * * DT_FIFO - FIFO * * DT_CHR - Character device * * DT_DIR - Directory * * DT_BLK - Block device * * DT_REG - Regular file * * DT_LNK - Symbolic link * * DT_SOCK - Local-domain socket */ unsigned char fs_ftype_to_dtype(unsigned int filetype) { if (filetype >= FT_MAX) return DT_UNKNOWN; return fs_dtype_by_ftype[filetype]; } EXPORT_SYMBOL_GPL(fs_ftype_to_dtype); /* * dirent file type to fs on-disk file type conversion * Values not initialized explicitly are FT_UNKNOWN (0). */ static const unsigned char fs_ftype_by_dtype[DT_MAX] = { [DT_REG] = FT_REG_FILE, [DT_DIR] = FT_DIR, [DT_LNK] = FT_SYMLINK, [DT_CHR] = FT_CHRDEV, [DT_BLK] = FT_BLKDEV, [DT_FIFO] = FT_FIFO, [DT_SOCK] = FT_SOCK, }; /** * fs_umode_to_ftype() - file mode to on-disk file type. * @mode: The file mode to convert. * * This function converts the file mode value to the on-disk file type (FT_*). * * Context: Any context. * Return: * * FT_UNKNOWN - Unknown type * * FT_REG_FILE - Regular file * * FT_DIR - Directory * * FT_CHRDEV - Character device * * FT_BLKDEV - Block device * * FT_FIFO - FIFO * * FT_SOCK - Local-domain socket * * FT_SYMLINK - Symbolic link */ unsigned char fs_umode_to_ftype(umode_t mode) { return fs_ftype_by_dtype[S_DT(mode)]; } EXPORT_SYMBOL_GPL(fs_umode_to_ftype); /** * fs_umode_to_dtype() - file mode to dirent file type. * @mode: The file mode to convert. * * This function converts the file mode value to the directory * entry type (DT_*). * * Context: Any context. * Return: * * DT_UNKNOWN - Unknown type * * DT_FIFO - FIFO * * DT_CHR - Character device * * DT_DIR - Directory * * DT_BLK - Block device * * DT_REG - Regular file * * DT_LNK - Symbolic link * * DT_SOCK - Local-domain socket */ unsigned char fs_umode_to_dtype(umode_t mode) { return fs_ftype_to_dtype(fs_umode_to_ftype(mode)); } EXPORT_SYMBOL_GPL(fs_umode_to_dtype);
99 50 50 1 2 6 84 86 86 86 48 47 47 59 59 59 59 59 1 1 1 32 32 32 32 2 2 2 1 1 50 50 50 50 49 35 35 35 35 35 35 35 50 49 50 50 50 3 3 3 2 3 53 53 53 53 53 1 1 2 2 1 71 71 72 72 1 1 1 14 14 14 9 9 9 9 9 44 44 44 11 11 11 11 9 9 9 9 9 1 1 1 1 1 1 1 1 1 1 6 6 6 6 6 10 10 10 10 10 10 43 44 43 44 44 44 17 17 17 5 5 5 10 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file * Copyright(c) 2016 Intel Deutschland GmbH * Copyright (C) 2018-2019, 2021-2024 Intel Corporation */ #ifndef __MAC80211_DRIVER_OPS #define __MAC80211_DRIVER_OPS #include <net/mac80211.h> #include "ieee80211_i.h" #include "trace.h" #define check_sdata_in_driver(sdata) ({ \ WARN_ONCE(!sdata->local->reconfig_failure && \ !(sdata->flags & IEEE80211_SDATA_IN_DRIVER), \ "%s: Failed check-sdata-in-driver check, flags: 0x%x\n", \ sdata->dev ? sdata->dev->name : sdata->name, sdata->flags); \ !!(sdata->flags & IEEE80211_SDATA_IN_DRIVER); \ }) static inline struct ieee80211_sub_if_data * get_bss_sdata(struct ieee80211_sub_if_data *sdata) { if (sdata && sdata->vif.type == NL80211_IFTYPE_AP_VLAN) sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap); return sdata; } static inline void drv_tx(struct ieee80211_local *local, struct ieee80211_tx_control *control, struct sk_buff *skb) { local->ops->tx(&local->hw, control, skb); } static inline void drv_sync_rx_queues(struct ieee80211_local *local, struct sta_info *sta) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (local->ops->sync_rx_queues) { trace_drv_sync_rx_queues(local, sta->sdata, &sta->sta); local->ops->sync_rx_queues(&local->hw); trace_drv_return_void(local); } } static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata, u32 sset, u8 *data) { struct ieee80211_local *local = sdata->local; if (local->ops->get_et_strings) { trace_drv_get_et_strings(local, sset); local->ops->get_et_strings(&local->hw, &sdata->vif, sset, data); trace_drv_return_void(local); } } static inline void drv_get_et_stats(struct ieee80211_sub_if_data *sdata, struct ethtool_stats *stats, u64 *data) { struct ieee80211_local *local = sdata->local; if (local->ops->get_et_stats) { trace_drv_get_et_stats(local); local->ops->get_et_stats(&local->hw, &sdata->vif, stats, data); trace_drv_return_void(local); } } static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata, int sset) { struct ieee80211_local *local = sdata->local; int rv = 0; if (local->ops->get_et_sset_count) { trace_drv_get_et_sset_count(local, sset); rv = local->ops->get_et_sset_count(&local->hw, &sdata->vif, sset); trace_drv_return_int(local, rv); } return rv; } int drv_start(struct ieee80211_local *local); void drv_stop(struct ieee80211_local *local, bool suspend); #ifdef CONFIG_PM static inline int drv_suspend(struct ieee80211_local *local, struct cfg80211_wowlan *wowlan) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_suspend(local); ret = local->ops->suspend(&local->hw, wowlan); trace_drv_return_int(local, ret); return ret; } static inline int drv_resume(struct ieee80211_local *local) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_resume(local); ret = local->ops->resume(&local->hw); trace_drv_return_int(local, ret); return ret; } static inline void drv_set_wakeup(struct ieee80211_local *local, bool enabled) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!local->ops->set_wakeup) return; trace_drv_set_wakeup(local, enabled); local->ops->set_wakeup(&local->hw, enabled); trace_drv_return_void(local); } #endif int drv_add_interface(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); int drv_change_interface(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type, bool p2p); void drv_remove_interface(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); static inline int drv_config(struct ieee80211_local *local, u32 changed) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_config(local, changed); ret = local->ops->config(&local->hw, changed); trace_drv_return_int(local, ret); return ret; } static inline void drv_vif_cfg_changed(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u64 changed) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; trace_drv_vif_cfg_changed(local, sdata, changed); if (local->ops->vif_cfg_changed) local->ops->vif_cfg_changed(&local->hw, &sdata->vif, changed); else if (local->ops->bss_info_changed) local->ops->bss_info_changed(&local->hw, &sdata->vif, &sdata->vif.bss_conf, changed); trace_drv_return_void(local); } void drv_link_info_changed(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *info, int link_id, u64 changed); static inline u64 drv_prepare_multicast(struct ieee80211_local *local, struct netdev_hw_addr_list *mc_list) { u64 ret = 0; trace_drv_prepare_multicast(local, mc_list->count); if (local->ops->prepare_multicast) ret = local->ops->prepare_multicast(&local->hw, mc_list); trace_drv_return_u64(local, ret); return ret; } static inline void drv_configure_filter(struct ieee80211_local *local, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_configure_filter(local, changed_flags, total_flags, multicast); local->ops->configure_filter(&local->hw, changed_flags, total_flags, multicast); trace_drv_return_void(local); } static inline void drv_config_iface_filter(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, unsigned int filter_flags, unsigned int changed_flags) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_config_iface_filter(local, sdata, filter_flags, changed_flags); if (local->ops->config_iface_filter) local->ops->config_iface_filter(&local->hw, &sdata->vif, filter_flags, changed_flags); trace_drv_return_void(local); } static inline int drv_set_tim(struct ieee80211_local *local, struct ieee80211_sta *sta, bool set) { int ret = 0; trace_drv_set_tim(local, sta, set); if (local->ops->set_tim) ret = local->ops->set_tim(&local->hw, sta, set); trace_drv_return_int(local, ret); return ret; } int drv_set_key(struct ieee80211_local *local, enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); static inline void drv_update_tkip_key(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_key_conf *conf, struct sta_info *sta, u32 iv32, u16 *phase1key) { struct ieee80211_sta *ista = NULL; if (sta) ista = &sta->sta; sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); if (local->ops->update_tkip_key) local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, ista, iv32, phase1key); trace_drv_return_void(local); } static inline int drv_hw_scan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_scan_request *req) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_hw_scan(local, sdata); ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); trace_drv_return_int(local, ret); return ret; } static inline void drv_cancel_hw_scan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; trace_drv_cancel_hw_scan(local, sdata); local->ops->cancel_hw_scan(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline int drv_sched_scan_start(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_sched_scan_start(local, sdata); ret = local->ops->sched_scan_start(&local->hw, &sdata->vif, req, ies); trace_drv_return_int(local, ret); return ret; } static inline int drv_sched_scan_stop(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_sched_scan_stop(local, sdata); ret = local->ops->sched_scan_stop(&local->hw, &sdata->vif); trace_drv_return_int(local, ret); return ret; } static inline void drv_sw_scan_start(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const u8 *mac_addr) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_sw_scan_start(local, sdata, mac_addr); if (local->ops->sw_scan_start) local->ops->sw_scan_start(&local->hw, &sdata->vif, mac_addr); trace_drv_return_void(local); } static inline void drv_sw_scan_complete(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_sw_scan_complete(local, sdata); if (local->ops->sw_scan_complete) local->ops->sw_scan_complete(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline int drv_get_stats(struct ieee80211_local *local, struct ieee80211_low_level_stats *stats) { int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (local->ops->get_stats) ret = local->ops->get_stats(&local->hw, stats); trace_drv_get_stats(local, stats, ret); return ret; } static inline void drv_get_key_seq(struct ieee80211_local *local, struct ieee80211_key *key, struct ieee80211_key_seq *seq) { if (local->ops->get_key_seq) local->ops->get_key_seq(&local->hw, &key->conf, seq); trace_drv_get_key_seq(local, &key->conf); } static inline int drv_set_frag_threshold(struct ieee80211_local *local, u32 value) { int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_set_frag_threshold(local, value); if (local->ops->set_frag_threshold) ret = local->ops->set_frag_threshold(&local->hw, value); trace_drv_return_int(local, ret); return ret; } static inline int drv_set_rts_threshold(struct ieee80211_local *local, u32 value) { int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_set_rts_threshold(local, value); if (local->ops->set_rts_threshold) ret = local->ops->set_rts_threshold(&local->hw, value); trace_drv_return_int(local, ret); return ret; } static inline int drv_set_coverage_class(struct ieee80211_local *local, s16 value) { int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_set_coverage_class(local, value); if (local->ops->set_coverage_class) local->ops->set_coverage_class(&local->hw, value); else ret = -EOPNOTSUPP; trace_drv_return_int(local, ret); return ret; } static inline void drv_sta_notify(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_notify(local, sdata, cmd, sta); if (local->ops->sta_notify) local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); trace_drv_return_void(local); } static inline int drv_sta_add(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta) { int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_sta_add(local, sdata, sta); if (local->ops->sta_add) ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); trace_drv_return_int(local, ret); return ret; } static inline void drv_sta_remove(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_remove(local, sdata, sta); if (local->ops->sta_remove) local->ops->sta_remove(&local->hw, &sdata->vif, sta); trace_drv_return_void(local); } #ifdef CONFIG_MAC80211_DEBUGFS static inline void drv_vif_add_debugfs(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); if (sdata->vif.type == NL80211_IFTYPE_MONITOR || WARN_ON(!sdata->vif.debugfs_dir)) return; sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; if (local->ops->vif_add_debugfs) local->ops->vif_add_debugfs(&local->hw, &sdata->vif); } static inline void drv_link_add_debugfs(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *link_conf, struct dentry *dir) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; if (local->ops->link_add_debugfs) local->ops->link_add_debugfs(&local->hw, &sdata->vif, link_conf, dir); } static inline void drv_sta_add_debugfs(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct dentry *dir) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; if (local->ops->sta_add_debugfs) local->ops->sta_add_debugfs(&local->hw, &sdata->vif, sta, dir); } static inline void drv_link_sta_add_debugfs(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_link_sta *link_sta, struct dentry *dir) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; if (local->ops->link_sta_add_debugfs) local->ops->link_sta_add_debugfs(&local->hw, &sdata->vif, link_sta, dir); } #else static inline void drv_vif_add_debugfs(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); } #endif static inline void drv_sta_pre_rcu_remove(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_pre_rcu_remove(local, sdata, &sta->sta); if (local->ops->sta_pre_rcu_remove) local->ops->sta_pre_rcu_remove(&local->hw, &sdata->vif, &sta->sta); trace_drv_return_void(local); } __must_check int drv_sta_state(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state); __must_check int drv_sta_set_txpwr(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta); void drv_link_sta_rc_update(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_link_sta *link_sta, u32 changed); static inline void drv_sta_rate_tbl_update(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta) { sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_rate_tbl_update(local, sdata, sta); if (local->ops->sta_rate_tbl_update) local->ops->sta_rate_tbl_update(&local->hw, &sdata->vif, sta); trace_drv_return_void(local); } static inline void drv_sta_statistics(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct station_info *sinfo) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_statistics(local, sdata, sta); if (local->ops->sta_statistics) local->ops->sta_statistics(&local->hw, &sdata->vif, sta, sinfo); trace_drv_return_void(local); } int drv_conf_tx(struct ieee80211_local *local, struct ieee80211_link_data *link, u16 ac, const struct ieee80211_tx_queue_params *params); u64 drv_get_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); void drv_set_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u64 tsf); void drv_offset_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, s64 offset); void drv_reset_tsf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); static inline int drv_tx_last_beacon(struct ieee80211_local *local) { int ret = 0; /* default unsupported op for less congestion */ might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_tx_last_beacon(local); if (local->ops->tx_last_beacon) ret = local->ops->tx_last_beacon(&local->hw); trace_drv_return_int(local, ret); return ret; } int drv_ampdu_action(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_ampdu_params *params); static inline int drv_get_survey(struct ieee80211_local *local, int idx, struct survey_info *survey) { int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_get_survey(local, idx, survey); if (local->ops->get_survey) ret = local->ops->get_survey(&local->hw, idx, survey); trace_drv_return_int(local, ret); return ret; } static inline void drv_rfkill_poll(struct ieee80211_local *local) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (local->ops->rfkill_poll) local->ops->rfkill_poll(&local->hw); } static inline void drv_flush(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u32 queues, bool drop) { struct ieee80211_vif *vif; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); sdata = get_bss_sdata(sdata); vif = sdata ? &sdata->vif : NULL; if (sdata && !check_sdata_in_driver(sdata)) return; trace_drv_flush(local, queues, drop); if (local->ops->flush) local->ops->flush(&local->hw, vif, queues, drop); trace_drv_return_void(local); } static inline void drv_flush_sta(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct sta_info *sta) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); sdata = get_bss_sdata(sdata); if (sdata && !check_sdata_in_driver(sdata)) return; trace_drv_flush_sta(local, sdata, &sta->sta); if (local->ops->flush_sta) local->ops->flush_sta(&local->hw, &sdata->vif, &sta->sta); trace_drv_return_void(local); } static inline void drv_channel_switch(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch *ch_switch) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_channel_switch(local, sdata, ch_switch); local->ops->channel_switch(&local->hw, &sdata->vif, ch_switch); trace_drv_return_void(local); } static inline int drv_set_antenna(struct ieee80211_local *local, u32 tx_ant, u32 rx_ant) { int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (local->ops->set_antenna) ret = local->ops->set_antenna(&local->hw, tx_ant, rx_ant); trace_drv_set_antenna(local, tx_ant, rx_ant, ret); return ret; } static inline int drv_get_antenna(struct ieee80211_local *local, u32 *tx_ant, u32 *rx_ant) { int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (local->ops->get_antenna) ret = local->ops->get_antenna(&local->hw, tx_ant, rx_ant); trace_drv_get_antenna(local, *tx_ant, *rx_ant, ret); return ret; } static inline int drv_remain_on_channel(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *chan, unsigned int duration, enum ieee80211_roc_type type) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_remain_on_channel(local, sdata, chan, duration, type); ret = local->ops->remain_on_channel(&local->hw, &sdata->vif, chan, duration, type); trace_drv_return_int(local, ret); return ret; } static inline int drv_cancel_remain_on_channel(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_cancel_remain_on_channel(local, sdata); ret = local->ops->cancel_remain_on_channel(&local->hw, &sdata->vif); trace_drv_return_int(local, ret); return ret; } static inline int drv_set_ringparam(struct ieee80211_local *local, u32 tx, u32 rx) { int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_set_ringparam(local, tx, rx); if (local->ops->set_ringparam) ret = local->ops->set_ringparam(&local->hw, tx, rx); trace_drv_return_int(local, ret); return ret; } static inline void drv_get_ringparam(struct ieee80211_local *local, u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_get_ringparam(local, tx, tx_max, rx, rx_max); if (local->ops->get_ringparam) local->ops->get_ringparam(&local->hw, tx, tx_max, rx, rx_max); trace_drv_return_void(local); } static inline bool drv_tx_frames_pending(struct ieee80211_local *local) { bool ret = false; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_tx_frames_pending(local); if (local->ops->tx_frames_pending) ret = local->ops->tx_frames_pending(&local->hw); trace_drv_return_bool(local, ret); return ret; } static inline int drv_set_bitrate_mask(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const struct cfg80211_bitrate_mask *mask) { int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_set_bitrate_mask(local, sdata, mask); if (local->ops->set_bitrate_mask) ret = local->ops->set_bitrate_mask(&local->hw, &sdata->vif, mask); trace_drv_return_int(local, ret); return ret; } static inline void drv_set_rekey_data(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_gtk_rekey_data *data) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; trace_drv_set_rekey_data(local, sdata, data); if (local->ops->set_rekey_data) local->ops->set_rekey_data(&local->hw, &sdata->vif, data); trace_drv_return_void(local); } static inline void drv_event_callback(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const struct ieee80211_event *event) { trace_drv_event_callback(local, sdata, event); if (local->ops->event_callback) local->ops->event_callback(&local->hw, &sdata->vif, event); trace_drv_return_void(local); } static inline void drv_release_buffered_frames(struct ieee80211_local *local, struct sta_info *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { trace_drv_release_buffered_frames(local, &sta->sta, tids, num_frames, reason, more_data); if (local->ops->release_buffered_frames) local->ops->release_buffered_frames(&local->hw, &sta->sta, tids, num_frames, reason, more_data); trace_drv_return_void(local); } static inline void drv_allow_buffered_frames(struct ieee80211_local *local, struct sta_info *sta, u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data) { trace_drv_allow_buffered_frames(local, &sta->sta, tids, num_frames, reason, more_data); if (local->ops->allow_buffered_frames) local->ops->allow_buffered_frames(&local->hw, &sta->sta, tids, num_frames, reason, more_data); trace_drv_return_void(local); } static inline void drv_mgd_prepare_tx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_prep_tx_info *info) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); info->link_id = info->link_id < 0 ? 0 : info->link_id; trace_drv_mgd_prepare_tx(local, sdata, info->duration, info->subtype, info->success); if (local->ops->mgd_prepare_tx) local->ops->mgd_prepare_tx(&local->hw, &sdata->vif, info); trace_drv_return_void(local); } static inline void drv_mgd_complete_tx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_prep_tx_info *info) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); trace_drv_mgd_complete_tx(local, sdata, info->duration, info->subtype, info->success); if (local->ops->mgd_complete_tx) local->ops->mgd_complete_tx(&local->hw, &sdata->vif, info); trace_drv_return_void(local); } static inline void drv_mgd_protect_tdls_discover(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, int link_id) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION); link_id = link_id > 0 ? link_id : 0; trace_drv_mgd_protect_tdls_discover(local, sdata); if (local->ops->mgd_protect_tdls_discover) local->ops->mgd_protect_tdls_discover(&local->hw, &sdata->vif, link_id); trace_drv_return_void(local); } static inline int drv_add_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_add_chanctx(local, ctx); if (local->ops->add_chanctx) ret = local->ops->add_chanctx(&local->hw, &ctx->conf); trace_drv_return_int(local, ret); if (!ret) ctx->driver_present = true; return ret; } static inline void drv_remove_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (WARN_ON(!ctx->driver_present)) return; trace_drv_remove_chanctx(local, ctx); if (local->ops->remove_chanctx) local->ops->remove_chanctx(&local->hw, &ctx->conf); trace_drv_return_void(local); ctx->driver_present = false; } static inline void drv_change_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, u32 changed) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_change_chanctx(local, ctx, changed); if (local->ops->change_chanctx) { WARN_ON_ONCE(!ctx->driver_present); local->ops->change_chanctx(&local->hw, &ctx->conf, changed); } trace_drv_return_void(local); } int drv_assign_vif_chanctx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx *ctx); void drv_unassign_vif_chanctx(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx *ctx); int drv_switch_vif_chanctx(struct ieee80211_local *local, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs, enum ieee80211_chanctx_switch_mode mode); static inline int drv_start_ap(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *link_conf) { int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_start_ap(local, sdata, link_conf); if (local->ops->start_ap) ret = local->ops->start_ap(&local->hw, &sdata->vif, link_conf); trace_drv_return_int(local, ret); return ret; } static inline void drv_stop_ap(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_bss_conf *link_conf) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; trace_drv_stop_ap(local, sdata, link_conf); if (local->ops->stop_ap) local->ops->stop_ap(&local->hw, &sdata->vif, link_conf); trace_drv_return_void(local); } static inline void drv_reconfig_complete(struct ieee80211_local *local, enum ieee80211_reconfig_type reconfig_type) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); trace_drv_reconfig_complete(local, reconfig_type); if (local->ops->reconfig_complete) local->ops->reconfig_complete(&local->hw, reconfig_type); trace_drv_return_void(local); } static inline void drv_set_default_unicast_key(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, int key_idx) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; WARN_ON_ONCE(key_idx < -1 || key_idx > 3); trace_drv_set_default_unicast_key(local, sdata, key_idx); if (local->ops->set_default_unicast_key) local->ops->set_default_unicast_key(&local->hw, &sdata->vif, key_idx); trace_drv_return_void(local); } #if IS_ENABLED(CONFIG_IPV6) static inline void drv_ipv6_addr_change(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct inet6_dev *idev) { trace_drv_ipv6_addr_change(local, sdata); if (local->ops->ipv6_addr_change) local->ops->ipv6_addr_change(&local->hw, &sdata->vif, idev); trace_drv_return_void(local); } #endif static inline void drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata, struct cfg80211_chan_def *chandef) { struct ieee80211_local *local = sdata->local; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (local->ops->channel_switch_beacon) { trace_drv_channel_switch_beacon(local, sdata, chandef); local->ops->channel_switch_beacon(&local->hw, &sdata->vif, chandef); } } static inline int drv_pre_channel_switch(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch *ch_switch) { struct ieee80211_local *local = sdata->local; int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; if (!ieee80211_vif_link_active(&sdata->vif, ch_switch->link_id)) return 0; trace_drv_pre_channel_switch(local, sdata, ch_switch); if (local->ops->pre_channel_switch) ret = local->ops->pre_channel_switch(&local->hw, &sdata->vif, ch_switch); trace_drv_return_int(local, ret); return ret; } static inline int drv_post_channel_switch(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) return 0; trace_drv_post_channel_switch(local, sdata); if (local->ops->post_channel_switch) ret = local->ops->post_channel_switch(&local->hw, &sdata->vif, link->conf); trace_drv_return_int(local, ret); return ret; } static inline void drv_abort_channel_switch(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) return; trace_drv_abort_channel_switch(local, sdata); if (local->ops->abort_channel_switch) local->ops->abort_channel_switch(&local->hw, &sdata->vif, link->conf); } static inline void drv_channel_switch_rx_beacon(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel_switch *ch_switch) { struct ieee80211_local *local = sdata->local; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; if (!ieee80211_vif_link_active(&sdata->vif, ch_switch->link_id)) return; trace_drv_channel_switch_rx_beacon(local, sdata, ch_switch); if (local->ops->channel_switch_rx_beacon) local->ops->channel_switch_rx_beacon(&local->hw, &sdata->vif, ch_switch); } static inline int drv_join_ibss(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { int ret = 0; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_join_ibss(local, sdata, &sdata->vif.bss_conf); if (local->ops->join_ibss) ret = local->ops->join_ibss(&local->hw, &sdata->vif); trace_drv_return_int(local, ret); return ret; } static inline void drv_leave_ibss(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; trace_drv_leave_ibss(local, sdata); if (local->ops->leave_ibss) local->ops->leave_ibss(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline u32 drv_get_expected_throughput(struct ieee80211_local *local, struct sta_info *sta) { u32 ret = 0; trace_drv_get_expected_throughput(&sta->sta); if (local->ops->get_expected_throughput && sta->uploaded) ret = local->ops->get_expected_throughput(&local->hw, &sta->sta); trace_drv_return_u32(local, ret); return ret; } static inline int drv_get_txpower(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, int *dbm) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!local->ops->get_txpower) return -EOPNOTSUPP; ret = local->ops->get_txpower(&local->hw, &sdata->vif, dbm); trace_drv_get_txpower(local, sdata, *dbm, ret); return ret; } static inline int drv_tdls_channel_switch(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, u8 oper_class, struct cfg80211_chan_def *chandef, struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; if (!local->ops->tdls_channel_switch) return -EOPNOTSUPP; trace_drv_tdls_channel_switch(local, sdata, sta, oper_class, chandef); ret = local->ops->tdls_channel_switch(&local->hw, &sdata->vif, sta, oper_class, chandef, tmpl_skb, ch_sw_tm_ie); trace_drv_return_int(local, ret); return ret; } static inline void drv_tdls_cancel_channel_switch(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; if (!local->ops->tdls_cancel_channel_switch) return; trace_drv_tdls_cancel_channel_switch(local, sdata, sta); local->ops->tdls_cancel_channel_switch(&local->hw, &sdata->vif, sta); trace_drv_return_void(local); } static inline void drv_tdls_recv_channel_switch(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_tdls_ch_sw_params *params) { trace_drv_tdls_recv_channel_switch(local, sdata, params); if (local->ops->tdls_recv_channel_switch) local->ops->tdls_recv_channel_switch(&local->hw, &sdata->vif, params); trace_drv_return_void(local); } static inline void drv_wake_tx_queue(struct ieee80211_local *local, struct txq_info *txq) { struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); /* In reconfig don't transmit now, but mark for waking later */ if (local->in_reconfig) { set_bit(IEEE80211_TXQ_DIRTY, &txq->flags); return; } if (!check_sdata_in_driver(sdata)) return; trace_drv_wake_tx_queue(local, sdata, txq); local->ops->wake_tx_queue(&local->hw, &txq->txq); } static inline void schedule_and_wake_txq(struct ieee80211_local *local, struct txq_info *txqi) { ieee80211_schedule_txq(&local->hw, &txqi->txq); drv_wake_tx_queue(local, txqi); } static inline int drv_can_aggregate_in_amsdu(struct ieee80211_local *local, struct sk_buff *head, struct sk_buff *skb) { if (!local->ops->can_aggregate_in_amsdu) return true; return local->ops->can_aggregate_in_amsdu(&local->hw, head, skb); } static inline int drv_get_ftm_responder_stats(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_ftm_responder_stats *ftm_stats) { u32 ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; if (local->ops->get_ftm_responder_stats) ret = local->ops->get_ftm_responder_stats(&local->hw, &sdata->vif, ftm_stats); trace_drv_get_ftm_responder_stats(local, sdata, ftm_stats); return ret; } static inline int drv_start_pmsr(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_pmsr_request *request) { int ret = -EOPNOTSUPP; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_start_pmsr(local, sdata); if (local->ops->start_pmsr) ret = local->ops->start_pmsr(&local->hw, &sdata->vif, request); trace_drv_return_int(local, ret); return ret; } static inline void drv_abort_pmsr(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_pmsr_request *request) { trace_drv_abort_pmsr(local, sdata); might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; if (local->ops->abort_pmsr) local->ops->abort_pmsr(&local->hw, &sdata->vif, request); trace_drv_return_void(local); } static inline int drv_start_nan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_nan_conf *conf) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); check_sdata_in_driver(sdata); trace_drv_start_nan(local, sdata, conf); ret = local->ops->start_nan(&local->hw, &sdata->vif, conf); trace_drv_return_int(local, ret); return ret; } static inline void drv_stop_nan(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); check_sdata_in_driver(sdata); trace_drv_stop_nan(local, sdata); local->ops->stop_nan(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline int drv_nan_change_conf(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct cfg80211_nan_conf *conf, u32 changes) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); check_sdata_in_driver(sdata); if (!local->ops->nan_change_conf) return -EOPNOTSUPP; trace_drv_nan_change_conf(local, sdata, conf, changes); ret = local->ops->nan_change_conf(&local->hw, &sdata->vif, conf, changes); trace_drv_return_int(local, ret); return ret; } static inline int drv_add_nan_func(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, const struct cfg80211_nan_func *nan_func) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); check_sdata_in_driver(sdata); if (!local->ops->add_nan_func) return -EOPNOTSUPP; trace_drv_add_nan_func(local, sdata, nan_func); ret = local->ops->add_nan_func(&local->hw, &sdata->vif, nan_func); trace_drv_return_int(local, ret); return ret; } static inline void drv_del_nan_func(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u8 instance_id) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); check_sdata_in_driver(sdata); trace_drv_del_nan_func(local, sdata, instance_id); if (local->ops->del_nan_func) local->ops->del_nan_func(&local->hw, &sdata->vif, instance_id); trace_drv_return_void(local); } static inline int drv_set_tid_config(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct cfg80211_tid_config *tid_conf) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); ret = local->ops->set_tid_config(&local->hw, &sdata->vif, sta, tid_conf); trace_drv_return_int(local, ret); return ret; } static inline int drv_reset_tid_config(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, u8 tids) { int ret; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); ret = local->ops->reset_tid_config(&local->hw, &sdata->vif, sta, tids); trace_drv_return_int(local, ret); return ret; } static inline void drv_update_vif_offload(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); check_sdata_in_driver(sdata); if (!local->ops->update_vif_offload) return; trace_drv_update_vif_offload(local, sdata); local->ops->update_vif_offload(&local->hw, &sdata->vif); trace_drv_return_void(local); } static inline void drv_sta_set_4addr(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, bool enabled) { sdata = get_bss_sdata(sdata); might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_set_4addr(local, sdata, sta, enabled); if (local->ops->sta_set_4addr) local->ops->sta_set_4addr(&local->hw, &sdata->vif, sta, enabled); trace_drv_return_void(local); } static inline void drv_sta_set_decap_offload(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, bool enabled) { sdata = get_bss_sdata(sdata); might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; trace_drv_sta_set_decap_offload(local, sdata, sta, enabled); if (local->ops->sta_set_decap_offload) local->ops->sta_set_decap_offload(&local->hw, &sdata->vif, sta, enabled); trace_drv_return_void(local); } static inline void drv_add_twt_setup(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct ieee80211_twt_setup *twt) { struct ieee80211_twt_params *twt_agrt; might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; twt_agrt = (void *)twt->params; trace_drv_add_twt_setup(local, sta, twt, twt_agrt); local->ops->add_twt_setup(&local->hw, sta, twt); trace_drv_return_void(local); } static inline void drv_twt_teardown_request(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, u8 flowid) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return; if (!local->ops->twt_teardown_request) return; trace_drv_twt_teardown_request(local, sta, flowid); local->ops->twt_teardown_request(&local->hw, sta, flowid); trace_drv_return_void(local); } static inline int drv_net_fill_forward_path(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, struct net_device_path_ctx *ctx, struct net_device_path *path) { int ret = -EOPNOTSUPP; sdata = get_bss_sdata(sdata); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_net_fill_forward_path(local, sdata, sta); if (local->ops->net_fill_forward_path) ret = local->ops->net_fill_forward_path(&local->hw, &sdata->vif, sta, ctx, path); trace_drv_return_int(local, ret); return ret; } static inline int drv_net_setup_tc(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct net_device *dev, enum tc_setup_type type, void *type_data) { int ret = -EOPNOTSUPP; might_sleep(); sdata = get_bss_sdata(sdata); trace_drv_net_setup_tc(local, sdata, type); if (local->ops->net_setup_tc) ret = local->ops->net_setup_tc(&local->hw, &sdata->vif, dev, type, type_data); trace_drv_return_int(local, ret); return ret; } static inline bool drv_can_activate_links(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u16 active_links) { bool ret = true; lockdep_assert_wiphy(local->hw.wiphy); if (!check_sdata_in_driver(sdata)) return false; trace_drv_can_activate_links(local, sdata, active_links); if (local->ops->can_activate_links) ret = local->ops->can_activate_links(&local->hw, &sdata->vif, active_links); trace_drv_return_bool(local, ret); return ret; } int drv_change_vif_links(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, u16 old_links, u16 new_links, struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]); int drv_change_sta_links(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_sta *sta, u16 old_links, u16 new_links); static inline enum ieee80211_neg_ttlm_res drv_can_neg_ttlm(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata, struct ieee80211_neg_ttlm *neg_ttlm) { enum ieee80211_neg_ttlm_res res = NEG_TTLM_RES_REJECT; might_sleep(); if (!check_sdata_in_driver(sdata)) return -EIO; trace_drv_can_neg_ttlm(local, sdata, neg_ttlm); if (local->ops->can_neg_ttlm) res = local->ops->can_neg_ttlm(&local->hw, &sdata->vif, neg_ttlm); trace_drv_neg_ttlm_res(local, sdata, res, neg_ttlm); return res; } static inline void drv_prep_add_interface(struct ieee80211_local *local, enum nl80211_iftype type) { trace_drv_prep_add_interface(local, type); if (local->ops->prep_add_interface) local->ops->prep_add_interface(&local->hw, type); trace_drv_return_void(local); } #endif /* __MAC80211_DRIVER_OPS */
434 435 64 62 62 181 181 175 169 83 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 // SPDX-License-Identifier: GPL-2.0-only #ifndef KVM_X86_MMU_SPTE_H #define KVM_X86_MMU_SPTE_H #include <asm/vmx.h> #include "mmu.h" #include "mmu_internal.h" /* * A MMU present SPTE is backed by actual memory and may or may not be present * in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it * is ignored by all flavors of SPTEs and checking a low bit often generates * better code than for a high bit, e.g. 56+. MMU present checks are pervasive * enough that the improved code generation is noticeable in KVM's footprint. */ #define SPTE_MMU_PRESENT_MASK BIT_ULL(11) /* * TDP SPTES (more specifically, EPT SPTEs) may not have A/D bits, and may also * be restricted to using write-protection (for L2 when CPU dirty logging, i.e. * PML, is enabled). Use bits 52 and 53 to hold the type of A/D tracking that * is must be employed for a given TDP SPTE. * * Note, the "enabled" mask must be '0', as bits 62:52 are _reserved_ for PAE * paging, including NPT PAE. This scheme works because legacy shadow paging * is guaranteed to have A/D bits and write-protection is forced only for * TDP with CPU dirty logging (PML). If NPT ever gains PML-like support, it * must be restricted to 64-bit KVM. */ #define SPTE_TDP_AD_SHIFT 52 #define SPTE_TDP_AD_MASK (3ULL << SPTE_TDP_AD_SHIFT) #define SPTE_TDP_AD_ENABLED (0ULL << SPTE_TDP_AD_SHIFT) #define SPTE_TDP_AD_DISABLED (1ULL << SPTE_TDP_AD_SHIFT) #define SPTE_TDP_AD_WRPROT_ONLY (2ULL << SPTE_TDP_AD_SHIFT) static_assert(SPTE_TDP_AD_ENABLED == 0); #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK #define SPTE_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1)) #else #define SPTE_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) #endif #define SPTE_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ | shadow_x_mask | shadow_nx_mask | shadow_me_mask) #define ACC_EXEC_MASK 1 #define ACC_WRITE_MASK PT_WRITABLE_MASK #define ACC_USER_MASK PT_USER_MASK #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) /* The mask for the R/X bits in EPT PTEs */ #define SPTE_EPT_READABLE_MASK 0x1ull #define SPTE_EPT_EXECUTABLE_MASK 0x4ull #define SPTE_LEVEL_BITS 9 #define SPTE_LEVEL_SHIFT(level) __PT_LEVEL_SHIFT(level, SPTE_LEVEL_BITS) #define SPTE_INDEX(address, level) __PT_INDEX(address, level, SPTE_LEVEL_BITS) #define SPTE_ENT_PER_PAGE __PT_ENT_PER_PAGE(SPTE_LEVEL_BITS) /* * The mask/shift to use for saving the original R/X bits when marking the PTE * as not-present for access tracking purposes. We do not save the W bit as the * PTEs being access tracked also need to be dirty tracked, so the W bit will be * restored only when a write is attempted to the page. This mask obviously * must not overlap the A/D type mask. */ #define SHADOW_ACC_TRACK_SAVED_BITS_MASK (SPTE_EPT_READABLE_MASK | \ SPTE_EPT_EXECUTABLE_MASK) #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT 54 #define SHADOW_ACC_TRACK_SAVED_MASK (SHADOW_ACC_TRACK_SAVED_BITS_MASK << \ SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK)); /* * {DEFAULT,EPT}_SPTE_{HOST,MMU}_WRITABLE are used to keep track of why a given * SPTE is write-protected. See is_writable_pte() for details. */ /* Bits 9 and 10 are ignored by all non-EPT PTEs. */ #define DEFAULT_SPTE_HOST_WRITABLE BIT_ULL(9) #define DEFAULT_SPTE_MMU_WRITABLE BIT_ULL(10) /* * Low ignored bits are at a premium for EPT, use high ignored bits, taking care * to not overlap the A/D type mask or the saved access bits of access-tracked * SPTEs when A/D bits are disabled. */ #define EPT_SPTE_HOST_WRITABLE BIT_ULL(57) #define EPT_SPTE_MMU_WRITABLE BIT_ULL(58) static_assert(!(EPT_SPTE_HOST_WRITABLE & SPTE_TDP_AD_MASK)); static_assert(!(EPT_SPTE_MMU_WRITABLE & SPTE_TDP_AD_MASK)); static_assert(!(EPT_SPTE_HOST_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK)); static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK)); /* Defined only to keep the above static asserts readable. */ #undef SHADOW_ACC_TRACK_SAVED_MASK /* * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of * the memslots generation and is derived as follows: * * Bits 0-7 of the MMIO generation are propagated to spte bits 3-10 * Bits 8-18 of the MMIO generation are propagated to spte bits 52-62 * * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in * the MMIO generation number, as doing so would require stealing a bit from * the "real" generation number and thus effectively halve the maximum number * of MMIO generations that can be handled before encountering a wrap (which * requires a full MMU zap). The flag is instead explicitly queried when * checking for MMIO spte cache hits. */ #define MMIO_SPTE_GEN_LOW_START 3 #define MMIO_SPTE_GEN_LOW_END 10 #define MMIO_SPTE_GEN_HIGH_START 52 #define MMIO_SPTE_GEN_HIGH_END 62 #define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ MMIO_SPTE_GEN_LOW_START) #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ MMIO_SPTE_GEN_HIGH_START) static_assert(!(SPTE_MMU_PRESENT_MASK & (MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK))); /* * The SPTE MMIO mask must NOT overlap the MMIO generation bits or the * MMU-present bit. The generation obviously co-exists with the magic MMIO * mask/value, and MMIO SPTEs are considered !MMU-present. * * The SPTE MMIO mask is allowed to use hardware "present" bits (i.e. all EPT * RWX bits), all physical address bits (legal PA bits are used for "fast" MMIO * and so they're off-limits for generation; additional checks ensure the mask * doesn't overlap legal PA bits), and bit 63 (carved out for future usage). */ #define SPTE_MMIO_ALLOWED_MASK (BIT_ULL(63) | GENMASK_ULL(51, 12) | GENMASK_ULL(2, 0)) static_assert(!(SPTE_MMIO_ALLOWED_MASK & (SPTE_MMU_PRESENT_MASK | MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK))); #define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1) #define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1) /* remember to adjust the comment above as well if you change these */ static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11); #define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0) #define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS) #define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0) /* * Non-present SPTE value needs to set bit 63 for TDX, in order to suppress * #VE and get EPT violations on non-present PTEs. We can use the * same value also without TDX for both VMX and SVM: * * For SVM NPT, for non-present spte (bit 0 = 0), other bits are ignored. * For VMX EPT, bit 63 is ignored if #VE is disabled. (EPT_VIOLATION_VE=0) * bit 63 is #VE suppress if #VE is enabled. (EPT_VIOLATION_VE=1) */ #ifdef CONFIG_X86_64 #define SHADOW_NONPRESENT_VALUE BIT_ULL(63) static_assert(!(SHADOW_NONPRESENT_VALUE & SPTE_MMU_PRESENT_MASK)); #else #define SHADOW_NONPRESENT_VALUE 0ULL #endif /* * True if A/D bits are supported in hardware and are enabled by KVM. When * enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can disable * A/D bits in EPTP12, SP and SPTE variants are needed to handle the scenario * where KVM is using A/D bits for L1, but not L2. */ extern bool __read_mostly kvm_ad_enabled; extern u64 __read_mostly shadow_host_writable_mask; extern u64 __read_mostly shadow_mmu_writable_mask; extern u64 __read_mostly shadow_nx_mask; extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ extern u64 __read_mostly shadow_user_mask; extern u64 __read_mostly shadow_accessed_mask; extern u64 __read_mostly shadow_dirty_mask; extern u64 __read_mostly shadow_mmio_value; extern u64 __read_mostly shadow_mmio_mask; extern u64 __read_mostly shadow_mmio_access_mask; extern u64 __read_mostly shadow_present_mask; extern u64 __read_mostly shadow_memtype_mask; extern u64 __read_mostly shadow_me_value; extern u64 __read_mostly shadow_me_mask; /* * SPTEs in MMUs without A/D bits are marked with SPTE_TDP_AD_DISABLED; * shadow_acc_track_mask is the set of bits to be cleared in non-accessed * pages. */ extern u64 __read_mostly shadow_acc_track_mask; /* * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order * to guard against L1TF attacks. */ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask; /* * The number of high-order 1 bits to use in the mask above. */ #define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5 /* * If a thread running without exclusive control of the MMU lock must perform a * multi-part operation on an SPTE, it can set the SPTE to FROZEN_SPTE as a * non-present intermediate value. Other threads which encounter this value * should not modify the SPTE. * * Use a semi-arbitrary value that doesn't set RWX bits, i.e. is not-present on * both AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF * vulnerability. * * Only used by the TDP MMU. */ #define FROZEN_SPTE (SHADOW_NONPRESENT_VALUE | 0x5a0ULL) /* Frozen SPTEs must not be misconstrued as shadow present PTEs. */ static_assert(!(FROZEN_SPTE & SPTE_MMU_PRESENT_MASK)); static inline bool is_frozen_spte(u64 spte) { return spte == FROZEN_SPTE; } /* Get an SPTE's index into its parent's page table (and the spt array). */ static inline int spte_index(u64 *sptep) { return ((unsigned long)sptep / sizeof(*sptep)) & (SPTE_ENT_PER_PAGE - 1); } /* * In some cases, we need to preserve the GFN of a non-present or reserved * SPTE when we usurp the upper five bits of the physical address space to * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask * left into the reserved bits, i.e. the GFN in the SPTE will be split into * high and low parts. This mask covers the lower bits of the GFN. */ extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) { struct page *page = pfn_to_page((shadow_page) >> PAGE_SHIFT); return (struct kvm_mmu_page *)page_private(page); } static inline struct kvm_mmu_page *spte_to_child_sp(u64 spte) { return to_shadow_page(spte & SPTE_BASE_ADDR_MASK); } static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) { return to_shadow_page(__pa(sptep)); } static inline struct kvm_mmu_page *root_to_sp(hpa_t root) { if (kvm_mmu_is_dummy_root(root)) return NULL; /* * The "root" may be a special root, e.g. a PAE entry, treat it as a * SPTE to ensure any non-PA bits are dropped. */ return spte_to_child_sp(root); } static inline bool is_mmio_spte(struct kvm *kvm, u64 spte) { return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value && likely(enable_mmio_caching); } static inline bool is_shadow_present_pte(u64 pte) { return !!(pte & SPTE_MMU_PRESENT_MASK); } static inline bool is_ept_ve_possible(u64 spte) { return (shadow_present_mask & VMX_EPT_SUPPRESS_VE_BIT) && !(spte & VMX_EPT_SUPPRESS_VE_BIT) && (spte & VMX_EPT_RWX_MASK) != VMX_EPT_MISCONFIG_WX_VALUE; } static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) { return sp->role.ad_disabled; } static inline bool spte_ad_enabled(u64 spte) { KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED; } static inline bool spte_ad_need_write_protect(u64 spte) { KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); /* * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED is '0', * and non-TDP SPTEs will never set these bits. Optimize for 64-bit * TDP and do the A/D type check unconditionally. */ return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED; } static inline bool is_access_track_spte(u64 spte) { return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; } static inline bool is_large_pte(u64 pte) { return pte & PT_PAGE_SIZE_MASK; } static inline bool is_last_spte(u64 pte, int level) { return (level == PG_LEVEL_4K) || is_large_pte(pte); } static inline bool is_executable_pte(u64 spte) { return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask; } static inline kvm_pfn_t spte_to_pfn(u64 pte) { return (pte & SPTE_BASE_ADDR_MASK) >> PAGE_SHIFT; } static inline bool is_accessed_spte(u64 spte) { return spte & shadow_accessed_mask; } static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) { int bit7 = (pte >> 7) & 1; return rsvd_check->rsvd_bits_mask[bit7][level-1]; } static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) { return pte & get_rsvd_bits(rsvd_check, pte, level); } static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte) { return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f); } static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, u64 spte, int level) { return __is_bad_mt_xwr(rsvd_check, spte) || __is_rsvd_bits_set(rsvd_check, spte, level); } /* * A shadow-present leaf SPTE may be non-writable for 4 possible reasons: * * 1. To intercept writes for dirty logging. KVM write-protects huge pages * so that they can be split down into the dirty logging * granularity (4KiB) whenever the guest writes to them. KVM also * write-protects 4KiB pages so that writes can be recorded in the dirty log * (e.g. if not using PML). SPTEs are write-protected for dirty logging * during the VM-iotcls that enable dirty logging. * * 2. To intercept writes to guest page tables that KVM is shadowing. When a * guest writes to its page table the corresponding shadow page table will * be marked "unsync". That way KVM knows which shadow page tables need to * be updated on the next TLB flush, INVLPG, etc. and which do not. * * 3. To prevent guest writes to read-only memory, such as for memory in a * read-only memslot or guest memory backed by a read-only VMA. Writes to * such pages are disallowed entirely. * * 4. To emulate the Accessed bit for SPTEs without A/D bits. Note, in this * case, the SPTE is access-protected, not just write-protected! * * For cases #1 and #4, KVM can safely make such SPTEs writable without taking * mmu_lock as capturing the Accessed/Dirty state doesn't require taking it. * To differentiate #1 and #4 from #2 and #3, KVM uses two software-only bits * in the SPTE: * * shadow_mmu_writable_mask, aka MMU-writable - * Cleared on SPTEs that KVM is currently write-protecting for shadow paging * purposes (case 2 above). * * shadow_host_writable_mask, aka Host-writable - * Cleared on SPTEs that are not host-writable (case 3 above) * * Note, not all possible combinations of PT_WRITABLE_MASK, * shadow_mmu_writable_mask, and shadow_host_writable_mask are valid. A given * SPTE can be in only one of the following states, which map to the * aforementioned 3 cases: * * shadow_host_writable_mask | shadow_mmu_writable_mask | PT_WRITABLE_MASK * ------------------------- | ------------------------ | ---------------- * 1 | 1 | 1 (writable) * 1 | 1 | 0 (case 1) * 1 | 0 | 0 (case 2) * 0 | 0 | 0 (case 3) * * The valid combinations of these bits are checked by * check_spte_writable_invariants() whenever an SPTE is modified. * * Clearing the MMU-writable bit is always done under the MMU lock and always * accompanied by a TLB flush before dropping the lock to avoid corrupting the * shadow page tables between vCPUs. Write-protecting an SPTE for dirty logging * (which does not clear the MMU-writable bit), does not flush TLBs before * dropping the lock, as it only needs to synchronize guest writes with the * dirty bitmap. Similarly, making the SPTE inaccessible (and non-writable) for * access-tracking via the clear_young() MMU notifier also does not flush TLBs. * * So, there is the problem: clearing the MMU-writable bit can encounter a * write-protected SPTE while CPUs still have writable mappings for that SPTE * cached in their TLB. To address this, KVM always flushes TLBs when * write-protecting SPTEs if the MMU-writable bit is set on the old SPTE. * * The Host-writable bit is not modified on present SPTEs, it is only set or * cleared when an SPTE is first faulted in from non-present and then remains * immutable. */ static inline bool is_writable_pte(unsigned long pte) { return pte & PT_WRITABLE_MASK; } /* Note: spte must be a shadow-present leaf SPTE. */ static inline void check_spte_writable_invariants(u64 spte) { if (spte & shadow_mmu_writable_mask) WARN_ONCE(!(spte & shadow_host_writable_mask), KBUILD_MODNAME ": MMU-writable SPTE is not Host-writable: %llx", spte); else WARN_ONCE(is_writable_pte(spte), KBUILD_MODNAME ": Writable SPTE is not MMU-writable: %llx", spte); } static inline bool is_mmu_writable_spte(u64 spte) { return spte & shadow_mmu_writable_mask; } /* * Returns true if the access indicated by @fault is allowed by the existing * SPTE protections. Note, the caller is responsible for checking that the * SPTE is a shadow-present, leaf SPTE (either before or after). */ static inline bool is_access_allowed(struct kvm_page_fault *fault, u64 spte) { if (fault->exec) return is_executable_pte(spte); if (fault->write) return is_writable_pte(spte); /* Fault was on Read access */ return spte & PT_PRESENT_MASK; } /* * If the MMU-writable flag is cleared, i.e. the SPTE is write-protected for * write-tracking, remote TLBs must be flushed, even if the SPTE was read-only, * as KVM allows stale Writable TLB entries to exist. When dirty logging, KVM * flushes TLBs based on whether or not dirty bitmap/ring entries were reaped, * not whether or not SPTEs were modified, i.e. only the write-tracking case * needs to flush at the time the SPTEs is modified, before dropping mmu_lock. * * Don't flush if the Accessed bit is cleared, as access tracking tolerates * false negatives, e.g. KVM x86 omits TLB flushes even when aging SPTEs for a * mmu_notifier.clear_flush_young() event. * * Lastly, don't flush if the Dirty bit is cleared, as KVM unconditionally * flushes when enabling dirty logging (see kvm_mmu_slot_apply_flags()), and * when clearing dirty logs, KVM flushes based on whether or not dirty entries * were reaped from the bitmap/ring, not whether or not dirty SPTEs were found. * * Note, this logic only applies to shadow-present leaf SPTEs. The caller is * responsible for checking that the old SPTE is shadow-present, and is also * responsible for determining whether or not a TLB flush is required when * modifying a shadow-present non-leaf SPTE. */ static inline bool leaf_spte_change_needs_tlb_flush(u64 old_spte, u64 new_spte) { return is_mmu_writable_spte(old_spte) && !is_mmu_writable_spte(new_spte); } static inline u64 get_mmio_spte_generation(u64 spte) { u64 gen; gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT; gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT; return gen; } bool spte_has_volatile_bits(u64 spte); bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, const struct kvm_memory_slot *slot, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool prefetch, bool synchronizing, bool host_writable, u64 *new_spte); u64 make_small_spte(struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role, int index); u64 make_huge_spte(struct kvm *kvm, u64 small_spte, int level); u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled); u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access); u64 mark_spte_for_access_track(u64 spte); /* Restore an acc-track PTE back to a regular PTE */ static inline u64 restore_acc_track_spte(u64 spte) { u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) & SHADOW_ACC_TRACK_SAVED_BITS_MASK; spte &= ~shadow_acc_track_mask; spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK << SHADOW_ACC_TRACK_SAVED_BITS_SHIFT); spte |= saved_bits; return spte; } void __init kvm_mmu_spte_module_init(void); void kvm_mmu_reset_all_pte_masks(void); #endif
3 1 2 4 2 2 5 5 4 4 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 // SPDX-License-Identifier: GPL-2.0 /* * fs/sysfs/symlink.c - operations for initializing and mounting sysfs * * Copyright (c) 2001-3 Patrick Mochel * Copyright (c) 2007 SUSE Linux Products GmbH * Copyright (c) 2007 Tejun Heo <teheo@suse.de> * * Please see Documentation/filesystems/sysfs.rst for more information. */ #include <linux/fs.h> #include <linux/magic.h> #include <linux/mount.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/user_namespace.h> #include <linux/fs_context.h> #include <net/net_namespace.h> #include "sysfs.h" static struct kernfs_root *sysfs_root; struct kernfs_node *sysfs_root_kn; static int sysfs_get_tree(struct fs_context *fc) { struct kernfs_fs_context *kfc = fc->fs_private; int ret; ret = kernfs_get_tree(fc); if (ret) return ret; if (kfc->new_sb_created) fc->root->d_sb->s_iflags |= SB_I_USERNS_VISIBLE; return 0; } static void sysfs_fs_context_free(struct fs_context *fc) { struct kernfs_fs_context *kfc = fc->fs_private; if (kfc->ns_tag) kobj_ns_drop(KOBJ_NS_TYPE_NET, kfc->ns_tag); kernfs_free_fs_context(fc); kfree(kfc); } static const struct fs_context_operations sysfs_fs_context_ops = { .free = sysfs_fs_context_free, .get_tree = sysfs_get_tree, }; static int sysfs_init_fs_context(struct fs_context *fc) { struct kernfs_fs_context *kfc; struct net *netns; if (!(fc->sb_flags & SB_KERNMOUNT)) { if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET)) return -EPERM; } kfc = kzalloc(sizeof(struct kernfs_fs_context), GFP_KERNEL); if (!kfc) return -ENOMEM; kfc->ns_tag = netns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); kfc->root = sysfs_root; kfc->magic = SYSFS_MAGIC; fc->fs_private = kfc; fc->ops = &sysfs_fs_context_ops; if (netns) { put_user_ns(fc->user_ns); fc->user_ns = get_user_ns(netns->user_ns); } fc->global = true; return 0; } static void sysfs_kill_sb(struct super_block *sb) { void *ns = (void *)kernfs_super_ns(sb); kernfs_kill_sb(sb); kobj_ns_drop(KOBJ_NS_TYPE_NET, ns); } static struct file_system_type sysfs_fs_type = { .name = "sysfs", .init_fs_context = sysfs_init_fs_context, .kill_sb = sysfs_kill_sb, .fs_flags = FS_USERNS_MOUNT, }; int __init sysfs_init(void) { int err; sysfs_root = kernfs_create_root(NULL, KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, NULL); if (IS_ERR(sysfs_root)) return PTR_ERR(sysfs_root); sysfs_root_kn = kernfs_root_to_node(sysfs_root); err = register_filesystem(&sysfs_fs_type); if (err) { kernfs_destroy_root(sysfs_root); return err; } return 0; }
65 65 64 60 60 60 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 // SPDX-License-Identifier: LGPL-2.1-or-later /* * dmxdev.c - DVB demultiplexer device * * Copyright (C) 2000 Ralph Metzler & Marcus Metzler * for convergence integrated media GmbH */ #define pr_fmt(fmt) "dmxdev: " fmt #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/ioctl.h> #include <linux/wait.h> #include <linux/uaccess.h> #include <media/dmxdev.h> #include <media/dvb_vb2.h> static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); #define dprintk(fmt, arg...) do { \ if (debug) \ printk(KERN_DEBUG pr_fmt("%s: " fmt), \ __func__, ##arg); \ } while (0) static int dvb_dmxdev_buffer_write(struct dvb_ringbuffer *buf, const u8 *src, size_t len) { ssize_t free; if (!len) return 0; if (!buf->data) return 0; free = dvb_ringbuffer_free(buf); if (len > free) { dprintk("buffer overflow\n"); return -EOVERFLOW; } return dvb_ringbuffer_write(buf, src, len); } static ssize_t dvb_dmxdev_buffer_read(struct dvb_ringbuffer *src, int non_blocking, char __user *buf, size_t count, loff_t *ppos) { size_t todo; ssize_t avail; ssize_t ret = 0; if (!src->data) return 0; if (src->error) { ret = src->error; dvb_ringbuffer_flush(src); return ret; } for (todo = count; todo > 0; todo -= ret) { if (non_blocking && dvb_ringbuffer_empty(src)) { ret = -EWOULDBLOCK; break; } ret = wait_event_interruptible(src->queue, !dvb_ringbuffer_empty(src) || (src->error != 0)); if (ret < 0) break; if (src->error) { ret = src->error; dvb_ringbuffer_flush(src); break; } avail = dvb_ringbuffer_avail(src); if (avail > todo) avail = todo; ret = dvb_ringbuffer_read_user(src, buf, avail); if (ret < 0) break; buf += ret; } return (count - todo) ? (count - todo) : ret; } static struct dmx_frontend *get_fe(struct dmx_demux *demux, int type) { struct list_head *head, *pos; head = demux->get_frontends(demux); if (!head) return NULL; list_for_each(pos, head) if (DMX_FE_ENTRY(pos)->source == type) return DMX_FE_ENTRY(pos); return NULL; } static int dvb_dvr_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dmxdev *dmxdev = dvbdev->priv; struct dmx_frontend *front; bool need_ringbuffer = false; dprintk("%s\n", __func__); if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; if (dmxdev->exit) { mutex_unlock(&dmxdev->mutex); return -ENODEV; } dmxdev->may_do_mmap = 0; /* * The logic here is a little tricky due to the ifdef. * * The ringbuffer is used for both read and mmap. * * It is not needed, however, on two situations: * - Write devices (access with O_WRONLY); * - For duplex device nodes, opened with O_RDWR. */ if ((file->f_flags & O_ACCMODE) == O_RDONLY) need_ringbuffer = true; else if ((file->f_flags & O_ACCMODE) == O_RDWR) { if (!(dmxdev->capabilities & DMXDEV_CAP_DUPLEX)) { #ifdef CONFIG_DVB_MMAP dmxdev->may_do_mmap = 1; need_ringbuffer = true; #else mutex_unlock(&dmxdev->mutex); return -EOPNOTSUPP; #endif } } if (need_ringbuffer) { void *mem; if (!dvbdev->readers) { mutex_unlock(&dmxdev->mutex); return -EBUSY; } mem = vmalloc(DVR_BUFFER_SIZE); if (!mem) { mutex_unlock(&dmxdev->mutex); return -ENOMEM; } dvb_ringbuffer_init(&dmxdev->dvr_buffer, mem, DVR_BUFFER_SIZE); if (dmxdev->may_do_mmap) dvb_vb2_init(&dmxdev->dvr_vb2_ctx, "dvr", file->f_flags & O_NONBLOCK); dvbdev->readers--; } if ((file->f_flags & O_ACCMODE) == O_WRONLY) { dmxdev->dvr_orig_fe = dmxdev->demux->frontend; if (!dmxdev->demux->write) { mutex_unlock(&dmxdev->mutex); return -EOPNOTSUPP; } front = get_fe(dmxdev->demux, DMX_MEMORY_FE); if (!front) { mutex_unlock(&dmxdev->mutex); return -EINVAL; } dmxdev->demux->disconnect_frontend(dmxdev->demux); dmxdev->demux->connect_frontend(dmxdev->demux, front); } dvbdev->users++; mutex_unlock(&dmxdev->mutex); return 0; } static int dvb_dvr_release(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dmxdev *dmxdev = dvbdev->priv; mutex_lock(&dmxdev->mutex); if ((file->f_flags & O_ACCMODE) == O_WRONLY) { dmxdev->demux->disconnect_frontend(dmxdev->demux); dmxdev->demux->connect_frontend(dmxdev->demux, dmxdev->dvr_orig_fe); } if (((file->f_flags & O_ACCMODE) == O_RDONLY) || dmxdev->may_do_mmap) { if (dmxdev->may_do_mmap) { if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) dvb_vb2_stream_off(&dmxdev->dvr_vb2_ctx); dvb_vb2_release(&dmxdev->dvr_vb2_ctx); } dvbdev->readers++; if (dmxdev->dvr_buffer.data) { void *mem = dmxdev->dvr_buffer.data; /*memory barrier*/ mb(); spin_lock_irq(&dmxdev->lock); dmxdev->dvr_buffer.data = NULL; spin_unlock_irq(&dmxdev->lock); vfree(mem); } } /* TODO */ dvbdev->users--; if (dvbdev->users == 1 && dmxdev->exit == 1) { mutex_unlock(&dmxdev->mutex); wake_up(&dvbdev->wait_queue); } else mutex_unlock(&dmxdev->mutex); return 0; } static ssize_t dvb_dvr_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct dvb_device *dvbdev = file->private_data; struct dmxdev *dmxdev = dvbdev->priv; int ret; if (!dmxdev->demux->write) return -EOPNOTSUPP; if ((file->f_flags & O_ACCMODE) != O_WRONLY) return -EINVAL; if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; if (dmxdev->exit) { mutex_unlock(&dmxdev->mutex); return -ENODEV; } ret = dmxdev->demux->write(dmxdev->demux, buf, count); mutex_unlock(&dmxdev->mutex); return ret; } static ssize_t dvb_dvr_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct dvb_device *dvbdev = file->private_data; struct dmxdev *dmxdev = dvbdev->priv; if (dmxdev->exit) return -ENODEV; return dvb_dmxdev_buffer_read(&dmxdev->dvr_buffer, file->f_flags & O_NONBLOCK, buf, count, ppos); } static int dvb_dvr_set_buffer_size(struct dmxdev *dmxdev, unsigned long size) { struct dvb_ringbuffer *buf = &dmxdev->dvr_buffer; void *newmem; void *oldmem; dprintk("%s\n", __func__); if (buf->size == size) return 0; if (!size) return -EINVAL; newmem = vmalloc(size); if (!newmem) return -ENOMEM; oldmem = buf->data; spin_lock_irq(&dmxdev->lock); buf->data = newmem; buf->size = size; /* reset and not flush in case the buffer shrinks */ dvb_ringbuffer_reset(buf); spin_unlock_irq(&dmxdev->lock); vfree(oldmem); return 0; } static inline void dvb_dmxdev_filter_state_set(struct dmxdev_filter *dmxdevfilter, int state) { spin_lock_irq(&dmxdevfilter->dev->lock); dmxdevfilter->state = state; spin_unlock_irq(&dmxdevfilter->dev->lock); } static int dvb_dmxdev_set_buffer_size(struct dmxdev_filter *dmxdevfilter, unsigned long size) { struct dvb_ringbuffer *buf = &dmxdevfilter->buffer; void *newmem; void *oldmem; if (buf->size == size) return 0; if (!size) return -EINVAL; if (dmxdevfilter->state >= DMXDEV_STATE_GO) return -EBUSY; newmem = vmalloc(size); if (!newmem) return -ENOMEM; oldmem = buf->data; spin_lock_irq(&dmxdevfilter->dev->lock); buf->data = newmem; buf->size = size; /* reset and not flush in case the buffer shrinks */ dvb_ringbuffer_reset(buf); spin_unlock_irq(&dmxdevfilter->dev->lock); vfree(oldmem); return 0; } static void dvb_dmxdev_filter_timeout(struct timer_list *t) { struct dmxdev_filter *dmxdevfilter = from_timer(dmxdevfilter, t, timer); dmxdevfilter->buffer.error = -ETIMEDOUT; spin_lock_irq(&dmxdevfilter->dev->lock); dmxdevfilter->state = DMXDEV_STATE_TIMEDOUT; spin_unlock_irq(&dmxdevfilter->dev->lock); wake_up(&dmxdevfilter->buffer.queue); } static void dvb_dmxdev_filter_timer(struct dmxdev_filter *dmxdevfilter) { struct dmx_sct_filter_params *para = &dmxdevfilter->params.sec; del_timer(&dmxdevfilter->timer); if (para->timeout) { dmxdevfilter->timer.expires = jiffies + 1 + (HZ / 2 + HZ * para->timeout) / 1000; add_timer(&dmxdevfilter->timer); } } static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len, const u8 *buffer2, size_t buffer2_len, struct dmx_section_filter *filter, u32 *buffer_flags) { struct dmxdev_filter *dmxdevfilter = filter->priv; int ret; if (!dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx) && dmxdevfilter->buffer.error) { wake_up(&dmxdevfilter->buffer.queue); return 0; } spin_lock(&dmxdevfilter->dev->lock); if (dmxdevfilter->state != DMXDEV_STATE_GO) { spin_unlock(&dmxdevfilter->dev->lock); return 0; } del_timer(&dmxdevfilter->timer); dprintk("section callback %*ph\n", 6, buffer1); if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) { ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, buffer1, buffer1_len, buffer_flags); if (ret == buffer1_len) ret = dvb_vb2_fill_buffer(&dmxdevfilter->vb2_ctx, buffer2, buffer2_len, buffer_flags); } else { ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer1, buffer1_len); if (ret == buffer1_len) { ret = dvb_dmxdev_buffer_write(&dmxdevfilter->buffer, buffer2, buffer2_len); } } if (ret < 0) dmxdevfilter->buffer.error = ret; if (dmxdevfilter->params.sec.flags & DMX_ONESHOT) dmxdevfilter->state = DMXDEV_STATE_DONE; spin_unlock(&dmxdevfilter->dev->lock); wake_up(&dmxdevfilter->buffer.queue); return 0; } static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len, const u8 *buffer2, size_t buffer2_len, struct dmx_ts_feed *feed, u32 *buffer_flags) { struct dmxdev_filter *dmxdevfilter = feed->priv; struct dvb_ringbuffer *buffer; #ifdef CONFIG_DVB_MMAP struct dvb_vb2_ctx *ctx; #endif int ret; spin_lock(&dmxdevfilter->dev->lock); if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) { spin_unlock(&dmxdevfilter->dev->lock); return 0; } if (dmxdevfilter->params.pes.output == DMX_OUT_TAP || dmxdevfilter->params.pes.output == DMX_OUT_TSDEMUX_TAP) { buffer = &dmxdevfilter->buffer; #ifdef CONFIG_DVB_MMAP ctx = &dmxdevfilter->vb2_ctx; #endif } else { buffer = &dmxdevfilter->dev->dvr_buffer; #ifdef CONFIG_DVB_MMAP ctx = &dmxdevfilter->dev->dvr_vb2_ctx; #endif } if (dvb_vb2_is_streaming(ctx)) { ret = dvb_vb2_fill_buffer(ctx, buffer1, buffer1_len, buffer_flags); if (ret == buffer1_len) ret = dvb_vb2_fill_buffer(ctx, buffer2, buffer2_len, buffer_flags); } else { if (buffer->error) { spin_unlock(&dmxdevfilter->dev->lock); wake_up(&buffer->queue); return 0; } ret = dvb_dmxdev_buffer_write(buffer, buffer1, buffer1_len); if (ret == buffer1_len) ret = dvb_dmxdev_buffer_write(buffer, buffer2, buffer2_len); } if (ret < 0) buffer->error = ret; spin_unlock(&dmxdevfilter->dev->lock); wake_up(&buffer->queue); return 0; } /* stop feed but only mark the specified filter as stopped (state set) */ static int dvb_dmxdev_feed_stop(struct dmxdev_filter *dmxdevfilter) { struct dmxdev_feed *feed; dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); switch (dmxdevfilter->type) { case DMXDEV_TYPE_SEC: del_timer(&dmxdevfilter->timer); dmxdevfilter->feed.sec->stop_filtering(dmxdevfilter->feed.sec); break; case DMXDEV_TYPE_PES: list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) feed->ts->stop_filtering(feed->ts); break; default: return -EINVAL; } return 0; } /* start feed associated with the specified filter */ static int dvb_dmxdev_feed_start(struct dmxdev_filter *filter) { struct dmxdev_feed *feed; int ret; dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO); switch (filter->type) { case DMXDEV_TYPE_SEC: return filter->feed.sec->start_filtering(filter->feed.sec); case DMXDEV_TYPE_PES: list_for_each_entry(feed, &filter->feed.ts, next) { ret = feed->ts->start_filtering(feed->ts); if (ret < 0) { dvb_dmxdev_feed_stop(filter); return ret; } } break; default: return -EINVAL; } return 0; } /* restart section feed if it has filters left associated with it, otherwise release the feed */ static int dvb_dmxdev_feed_restart(struct dmxdev_filter *filter) { int i; struct dmxdev *dmxdev = filter->dev; u16 pid = filter->params.sec.pid; for (i = 0; i < dmxdev->filternum; i++) if (dmxdev->filter[i].state >= DMXDEV_STATE_GO && dmxdev->filter[i].type == DMXDEV_TYPE_SEC && dmxdev->filter[i].params.sec.pid == pid) { dvb_dmxdev_feed_start(&dmxdev->filter[i]); return 0; } filter->dev->demux->release_section_feed(dmxdev->demux, filter->feed.sec); return 0; } static int dvb_dmxdev_filter_stop(struct dmxdev_filter *dmxdevfilter) { struct dmxdev_feed *feed; struct dmx_demux *demux; if (dmxdevfilter->state < DMXDEV_STATE_GO) return 0; switch (dmxdevfilter->type) { case DMXDEV_TYPE_SEC: if (!dmxdevfilter->feed.sec) break; dvb_dmxdev_feed_stop(dmxdevfilter); if (dmxdevfilter->filter.sec) dmxdevfilter->feed.sec-> release_filter(dmxdevfilter->feed.sec, dmxdevfilter->filter.sec); dvb_dmxdev_feed_restart(dmxdevfilter); dmxdevfilter->feed.sec = NULL; break; case DMXDEV_TYPE_PES: dvb_dmxdev_feed_stop(dmxdevfilter); demux = dmxdevfilter->dev->demux; list_for_each_entry(feed, &dmxdevfilter->feed.ts, next) { demux->release_ts_feed(demux, feed->ts); feed->ts = NULL; } break; default: if (dmxdevfilter->state == DMXDEV_STATE_ALLOCATED) return 0; return -EINVAL; } dvb_ringbuffer_flush(&dmxdevfilter->buffer); return 0; } static void dvb_dmxdev_delete_pids(struct dmxdev_filter *dmxdevfilter) { struct dmxdev_feed *feed, *tmp; /* delete all PIDs */ list_for_each_entry_safe(feed, tmp, &dmxdevfilter->feed.ts, next) { list_del(&feed->next); kfree(feed); } BUG_ON(!list_empty(&dmxdevfilter->feed.ts)); } static inline int dvb_dmxdev_filter_reset(struct dmxdev_filter *dmxdevfilter) { if (dmxdevfilter->state < DMXDEV_STATE_SET) return 0; if (dmxdevfilter->type == DMXDEV_TYPE_PES) dvb_dmxdev_delete_pids(dmxdevfilter); dmxdevfilter->type = DMXDEV_TYPE_NONE; dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); return 0; } static int dvb_dmxdev_start_feed(struct dmxdev *dmxdev, struct dmxdev_filter *filter, struct dmxdev_feed *feed) { ktime_t timeout = ktime_set(0, 0); struct dmx_pes_filter_params *para = &filter->params.pes; enum dmx_output otype; int ret; int ts_type; enum dmx_ts_pes ts_pes; struct dmx_ts_feed *tsfeed; feed->ts = NULL; otype = para->output; ts_pes = para->pes_type; if (ts_pes < DMX_PES_OTHER) ts_type = TS_DECODER; else ts_type = 0; if (otype == DMX_OUT_TS_TAP) ts_type |= TS_PACKET; else if (otype == DMX_OUT_TSDEMUX_TAP) ts_type |= TS_PACKET | TS_DEMUX; else if (otype == DMX_OUT_TAP) ts_type |= TS_PACKET | TS_DEMUX | TS_PAYLOAD_ONLY; ret = dmxdev->demux->allocate_ts_feed(dmxdev->demux, &feed->ts, dvb_dmxdev_ts_callback); if (ret < 0) return ret; tsfeed = feed->ts; tsfeed->priv = filter; ret = tsfeed->set(tsfeed, feed->pid, ts_type, ts_pes, timeout); if (ret < 0) { dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed); return ret; } ret = tsfeed->start_filtering(tsfeed); if (ret < 0) { dmxdev->demux->release_ts_feed(dmxdev->demux, tsfeed); return ret; } return 0; } static int dvb_dmxdev_filter_start(struct dmxdev_filter *filter) { struct dmxdev *dmxdev = filter->dev; struct dmxdev_feed *feed; void *mem; int ret, i; if (filter->state < DMXDEV_STATE_SET) return -EINVAL; if (filter->state >= DMXDEV_STATE_GO) dvb_dmxdev_filter_stop(filter); if (!filter->buffer.data) { mem = vmalloc(filter->buffer.size); if (!mem) return -ENOMEM; spin_lock_irq(&filter->dev->lock); filter->buffer.data = mem; spin_unlock_irq(&filter->dev->lock); } dvb_ringbuffer_flush(&filter->buffer); switch (filter->type) { case DMXDEV_TYPE_SEC: { struct dmx_sct_filter_params *para = &filter->params.sec; struct dmx_section_filter **secfilter = &filter->filter.sec; struct dmx_section_feed **secfeed = &filter->feed.sec; *secfilter = NULL; *secfeed = NULL; /* find active filter/feed with same PID */ for (i = 0; i < dmxdev->filternum; i++) { if (dmxdev->filter[i].state >= DMXDEV_STATE_GO && dmxdev->filter[i].type == DMXDEV_TYPE_SEC && dmxdev->filter[i].params.sec.pid == para->pid) { *secfeed = dmxdev->filter[i].feed.sec; break; } } /* if no feed found, try to allocate new one */ if (!*secfeed) { ret = dmxdev->demux->allocate_section_feed(dmxdev->demux, secfeed, dvb_dmxdev_section_callback); if (!*secfeed) { pr_err("DVB (%s): could not alloc feed\n", __func__); return ret; } ret = (*secfeed)->set(*secfeed, para->pid, (para->flags & DMX_CHECK_CRC) ? 1 : 0); if (ret < 0) { pr_err("DVB (%s): could not set feed\n", __func__); dvb_dmxdev_feed_restart(filter); return ret; } } else { dvb_dmxdev_feed_stop(filter); } ret = (*secfeed)->allocate_filter(*secfeed, secfilter); if (ret < 0) { dvb_dmxdev_feed_restart(filter); filter->feed.sec->start_filtering(*secfeed); dprintk("could not get filter\n"); return ret; } (*secfilter)->priv = filter; memcpy(&((*secfilter)->filter_value[3]), &(para->filter.filter[1]), DMX_FILTER_SIZE - 1); memcpy(&(*secfilter)->filter_mask[3], &para->filter.mask[1], DMX_FILTER_SIZE - 1); memcpy(&(*secfilter)->filter_mode[3], &para->filter.mode[1], DMX_FILTER_SIZE - 1); (*secfilter)->filter_value[0] = para->filter.filter[0]; (*secfilter)->filter_mask[0] = para->filter.mask[0]; (*secfilter)->filter_mode[0] = para->filter.mode[0]; (*secfilter)->filter_mask[1] = 0; (*secfilter)->filter_mask[2] = 0; filter->todo = 0; ret = filter->feed.sec->start_filtering(filter->feed.sec); if (ret < 0) return ret; dvb_dmxdev_filter_timer(filter); break; } case DMXDEV_TYPE_PES: list_for_each_entry(feed, &filter->feed.ts, next) { ret = dvb_dmxdev_start_feed(dmxdev, filter, feed); if (ret < 0) { dvb_dmxdev_filter_stop(filter); return ret; } } break; default: return -EINVAL; } dvb_dmxdev_filter_state_set(filter, DMXDEV_STATE_GO); return 0; } static int dvb_demux_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dmxdev *dmxdev = dvbdev->priv; int i; struct dmxdev_filter *dmxdevfilter; if (!dmxdev->filter) return -EINVAL; if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; if (dmxdev->exit) { mutex_unlock(&dmxdev->mutex); return -ENODEV; } for (i = 0; i < dmxdev->filternum; i++) if (dmxdev->filter[i].state == DMXDEV_STATE_FREE) break; if (i == dmxdev->filternum) { mutex_unlock(&dmxdev->mutex); return -EMFILE; } dmxdevfilter = &dmxdev->filter[i]; mutex_init(&dmxdevfilter->mutex); file->private_data = dmxdevfilter; #ifdef CONFIG_DVB_MMAP dmxdev->may_do_mmap = 1; #else dmxdev->may_do_mmap = 0; #endif dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192); dvb_vb2_init(&dmxdevfilter->vb2_ctx, "demux_filter", file->f_flags & O_NONBLOCK); dmxdevfilter->type = DMXDEV_TYPE_NONE; dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED); timer_setup(&dmxdevfilter->timer, dvb_dmxdev_filter_timeout, 0); dvbdev->users++; mutex_unlock(&dmxdev->mutex); return 0; } static int dvb_dmxdev_filter_free(struct dmxdev *dmxdev, struct dmxdev_filter *dmxdevfilter) { mutex_lock(&dmxdev->mutex); mutex_lock(&dmxdevfilter->mutex); if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) dvb_vb2_stream_off(&dmxdevfilter->vb2_ctx); dvb_vb2_release(&dmxdevfilter->vb2_ctx); dvb_dmxdev_filter_stop(dmxdevfilter); dvb_dmxdev_filter_reset(dmxdevfilter); if (dmxdevfilter->buffer.data) { void *mem = dmxdevfilter->buffer.data; spin_lock_irq(&dmxdev->lock); dmxdevfilter->buffer.data = NULL; spin_unlock_irq(&dmxdev->lock); vfree(mem); } dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_FREE); wake_up(&dmxdevfilter->buffer.queue); mutex_unlock(&dmxdevfilter->mutex); mutex_unlock(&dmxdev->mutex); return 0; } static inline void invert_mode(struct dmx_filter *filter) { int i; for (i = 0; i < DMX_FILTER_SIZE; i++) filter->mode[i] ^= 0xff; } static int dvb_dmxdev_add_pid(struct dmxdev *dmxdev, struct dmxdev_filter *filter, u16 pid) { struct dmxdev_feed *feed; if ((filter->type != DMXDEV_TYPE_PES) || (filter->state < DMXDEV_STATE_SET)) return -EINVAL; /* only TS packet filters may have multiple PIDs */ if ((filter->params.pes.output != DMX_OUT_TSDEMUX_TAP) && (!list_empty(&filter->feed.ts))) return -EINVAL; feed = kzalloc(sizeof(struct dmxdev_feed), GFP_KERNEL); if (feed == NULL) return -ENOMEM; feed->pid = pid; list_add(&feed->next, &filter->feed.ts); if (filter->state >= DMXDEV_STATE_GO) return dvb_dmxdev_start_feed(dmxdev, filter, feed); return 0; } static int dvb_dmxdev_remove_pid(struct dmxdev *dmxdev, struct dmxdev_filter *filter, u16 pid) { struct dmxdev_feed *feed, *tmp; if ((filter->type != DMXDEV_TYPE_PES) || (filter->state < DMXDEV_STATE_SET)) return -EINVAL; list_for_each_entry_safe(feed, tmp, &filter->feed.ts, next) { if ((feed->pid == pid) && (feed->ts != NULL)) { feed->ts->stop_filtering(feed->ts); filter->dev->demux->release_ts_feed(filter->dev->demux, feed->ts); list_del(&feed->next); kfree(feed); } } return 0; } static int dvb_dmxdev_filter_set(struct dmxdev *dmxdev, struct dmxdev_filter *dmxdevfilter, struct dmx_sct_filter_params *params) { dprintk("%s: PID=0x%04x, flags=%02x, timeout=%d\n", __func__, params->pid, params->flags, params->timeout); dvb_dmxdev_filter_stop(dmxdevfilter); dmxdevfilter->type = DMXDEV_TYPE_SEC; memcpy(&dmxdevfilter->params.sec, params, sizeof(struct dmx_sct_filter_params)); invert_mode(&dmxdevfilter->params.sec.filter); dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); if (params->flags & DMX_IMMEDIATE_START) return dvb_dmxdev_filter_start(dmxdevfilter); return 0; } static int dvb_dmxdev_pes_filter_set(struct dmxdev *dmxdev, struct dmxdev_filter *dmxdevfilter, struct dmx_pes_filter_params *params) { int ret; dvb_dmxdev_filter_stop(dmxdevfilter); dvb_dmxdev_filter_reset(dmxdevfilter); if ((unsigned int)params->pes_type > DMX_PES_OTHER) return -EINVAL; dmxdevfilter->type = DMXDEV_TYPE_PES; memcpy(&dmxdevfilter->params, params, sizeof(struct dmx_pes_filter_params)); INIT_LIST_HEAD(&dmxdevfilter->feed.ts); dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET); ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, dmxdevfilter->params.pes.pid); if (ret < 0) return ret; if (params->flags & DMX_IMMEDIATE_START) return dvb_dmxdev_filter_start(dmxdevfilter); return 0; } static ssize_t dvb_dmxdev_read_sec(struct dmxdev_filter *dfil, struct file *file, char __user *buf, size_t count, loff_t *ppos) { int result, hcount; int done = 0; if (dfil->todo <= 0) { hcount = 3 + dfil->todo; if (hcount > count) hcount = count; result = dvb_dmxdev_buffer_read(&dfil->buffer, file->f_flags & O_NONBLOCK, buf, hcount, ppos); if (result < 0) { dfil->todo = 0; return result; } if (copy_from_user(dfil->secheader - dfil->todo, buf, result)) return -EFAULT; buf += result; done = result; count -= result; dfil->todo -= result; if (dfil->todo > -3) return done; dfil->todo = ((dfil->secheader[1] << 8) | dfil->secheader[2]) & 0xfff; if (!count) return done; } if (count > dfil->todo) count = dfil->todo; result = dvb_dmxdev_buffer_read(&dfil->buffer, file->f_flags & O_NONBLOCK, buf, count, ppos); if (result < 0) return result; dfil->todo -= result; return (result + done); } static ssize_t dvb_demux_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct dmxdev_filter *dmxdevfilter = file->private_data; int ret; if (mutex_lock_interruptible(&dmxdevfilter->mutex)) return -ERESTARTSYS; if (dmxdevfilter->type == DMXDEV_TYPE_SEC) ret = dvb_dmxdev_read_sec(dmxdevfilter, file, buf, count, ppos); else ret = dvb_dmxdev_buffer_read(&dmxdevfilter->buffer, file->f_flags & O_NONBLOCK, buf, count, ppos); mutex_unlock(&dmxdevfilter->mutex); return ret; } static int dvb_demux_do_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dmxdev_filter *dmxdevfilter = file->private_data; struct dmxdev *dmxdev = dmxdevfilter->dev; unsigned long arg = (unsigned long)parg; int ret = 0; if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; switch (cmd) { case DMX_START: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } if (dmxdevfilter->state < DMXDEV_STATE_SET) ret = -EINVAL; else ret = dvb_dmxdev_filter_start(dmxdevfilter); mutex_unlock(&dmxdevfilter->mutex); break; case DMX_STOP: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } ret = dvb_dmxdev_filter_stop(dmxdevfilter); mutex_unlock(&dmxdevfilter->mutex); break; case DMX_SET_FILTER: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } ret = dvb_dmxdev_filter_set(dmxdev, dmxdevfilter, parg); mutex_unlock(&dmxdevfilter->mutex); break; case DMX_SET_PES_FILTER: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } ret = dvb_dmxdev_pes_filter_set(dmxdev, dmxdevfilter, parg); mutex_unlock(&dmxdevfilter->mutex); break; case DMX_SET_BUFFER_SIZE: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } ret = dvb_dmxdev_set_buffer_size(dmxdevfilter, arg); mutex_unlock(&dmxdevfilter->mutex); break; case DMX_GET_PES_PIDS: if (!dmxdev->demux->get_pes_pids) { ret = -EINVAL; break; } dmxdev->demux->get_pes_pids(dmxdev->demux, parg); break; case DMX_GET_STC: if (!dmxdev->demux->get_stc) { ret = -EINVAL; break; } ret = dmxdev->demux->get_stc(dmxdev->demux, ((struct dmx_stc *)parg)->num, &((struct dmx_stc *)parg)->stc, &((struct dmx_stc *)parg)->base); break; case DMX_ADD_PID: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { ret = -ERESTARTSYS; break; } ret = dvb_dmxdev_add_pid(dmxdev, dmxdevfilter, *(u16 *)parg); mutex_unlock(&dmxdevfilter->mutex); break; case DMX_REMOVE_PID: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { ret = -ERESTARTSYS; break; } ret = dvb_dmxdev_remove_pid(dmxdev, dmxdevfilter, *(u16 *)parg); mutex_unlock(&dmxdevfilter->mutex); break; #ifdef CONFIG_DVB_MMAP case DMX_REQBUFS: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } ret = dvb_vb2_reqbufs(&dmxdevfilter->vb2_ctx, parg); mutex_unlock(&dmxdevfilter->mutex); break; case DMX_QUERYBUF: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } ret = dvb_vb2_querybuf(&dmxdevfilter->vb2_ctx, parg); mutex_unlock(&dmxdevfilter->mutex); break; case DMX_EXPBUF: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } ret = dvb_vb2_expbuf(&dmxdevfilter->vb2_ctx, parg); mutex_unlock(&dmxdevfilter->mutex); break; case DMX_QBUF: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } ret = dvb_vb2_qbuf(&dmxdevfilter->vb2_ctx, parg); if (ret == 0 && !dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) ret = dvb_vb2_stream_on(&dmxdevfilter->vb2_ctx); mutex_unlock(&dmxdevfilter->mutex); break; case DMX_DQBUF: if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } ret = dvb_vb2_dqbuf(&dmxdevfilter->vb2_ctx, parg); mutex_unlock(&dmxdevfilter->mutex); break; #endif default: ret = -ENOTTY; break; } mutex_unlock(&dmxdev->mutex); return ret; } static long dvb_demux_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl); } static __poll_t dvb_demux_poll(struct file *file, poll_table *wait) { struct dmxdev_filter *dmxdevfilter = file->private_data; __poll_t mask = 0; poll_wait(file, &dmxdevfilter->buffer.queue, wait); if ((!dmxdevfilter) || dmxdevfilter->dev->exit) return EPOLLERR; if (dvb_vb2_is_streaming(&dmxdevfilter->vb2_ctx)) return dvb_vb2_poll(&dmxdevfilter->vb2_ctx, file, wait); if (dmxdevfilter->state != DMXDEV_STATE_GO && dmxdevfilter->state != DMXDEV_STATE_DONE && dmxdevfilter->state != DMXDEV_STATE_TIMEDOUT) return 0; if (dmxdevfilter->buffer.error) mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); if (!dvb_ringbuffer_empty(&dmxdevfilter->buffer)) mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI); return mask; } #ifdef CONFIG_DVB_MMAP static int dvb_demux_mmap(struct file *file, struct vm_area_struct *vma) { struct dmxdev_filter *dmxdevfilter = file->private_data; struct dmxdev *dmxdev = dmxdevfilter->dev; int ret; if (!dmxdev->may_do_mmap) return -ENOTTY; if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; if (mutex_lock_interruptible(&dmxdevfilter->mutex)) { mutex_unlock(&dmxdev->mutex); return -ERESTARTSYS; } ret = dvb_vb2_mmap(&dmxdevfilter->vb2_ctx, vma); mutex_unlock(&dmxdevfilter->mutex); mutex_unlock(&dmxdev->mutex); return ret; } #endif static int dvb_demux_release(struct inode *inode, struct file *file) { struct dmxdev_filter *dmxdevfilter = file->private_data; struct dmxdev *dmxdev = dmxdevfilter->dev; int ret; ret = dvb_dmxdev_filter_free(dmxdev, dmxdevfilter); mutex_lock(&dmxdev->mutex); dmxdev->dvbdev->users--; if (dmxdev->dvbdev->users == 1 && dmxdev->exit == 1) { mutex_unlock(&dmxdev->mutex); wake_up(&dmxdev->dvbdev->wait_queue); } else mutex_unlock(&dmxdev->mutex); return ret; } static const struct file_operations dvb_demux_fops = { .owner = THIS_MODULE, .read = dvb_demux_read, .unlocked_ioctl = dvb_demux_ioctl, .compat_ioctl = dvb_demux_ioctl, .open = dvb_demux_open, .release = dvb_demux_release, .poll = dvb_demux_poll, .llseek = default_llseek, #ifdef CONFIG_DVB_MMAP .mmap = dvb_demux_mmap, #endif }; static const struct dvb_device dvbdev_demux = { .priv = NULL, .users = 1, .writers = 1, #if defined(CONFIG_MEDIA_CONTROLLER_DVB) .name = "dvb-demux", #endif .fops = &dvb_demux_fops }; static int dvb_dvr_do_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct dmxdev *dmxdev = dvbdev->priv; unsigned long arg = (unsigned long)parg; int ret; if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; switch (cmd) { case DMX_SET_BUFFER_SIZE: ret = dvb_dvr_set_buffer_size(dmxdev, arg); break; #ifdef CONFIG_DVB_MMAP case DMX_REQBUFS: ret = dvb_vb2_reqbufs(&dmxdev->dvr_vb2_ctx, parg); break; case DMX_QUERYBUF: ret = dvb_vb2_querybuf(&dmxdev->dvr_vb2_ctx, parg); break; case DMX_EXPBUF: ret = dvb_vb2_expbuf(&dmxdev->dvr_vb2_ctx, parg); break; case DMX_QBUF: ret = dvb_vb2_qbuf(&dmxdev->dvr_vb2_ctx, parg); if (ret == 0 && !dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) ret = dvb_vb2_stream_on(&dmxdev->dvr_vb2_ctx); break; case DMX_DQBUF: ret = dvb_vb2_dqbuf(&dmxdev->dvr_vb2_ctx, parg); break; #endif default: ret = -ENOTTY; break; } mutex_unlock(&dmxdev->mutex); return ret; } static long dvb_dvr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl); } static __poll_t dvb_dvr_poll(struct file *file, poll_table *wait) { struct dvb_device *dvbdev = file->private_data; struct dmxdev *dmxdev = dvbdev->priv; __poll_t mask = 0; dprintk("%s\n", __func__); poll_wait(file, &dmxdev->dvr_buffer.queue, wait); if (dmxdev->exit) return EPOLLERR; if (dvb_vb2_is_streaming(&dmxdev->dvr_vb2_ctx)) return dvb_vb2_poll(&dmxdev->dvr_vb2_ctx, file, wait); if (((file->f_flags & O_ACCMODE) == O_RDONLY) || dmxdev->may_do_mmap) { if (dmxdev->dvr_buffer.error) mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI | EPOLLERR); if (!dvb_ringbuffer_empty(&dmxdev->dvr_buffer)) mask |= (EPOLLIN | EPOLLRDNORM | EPOLLPRI); } else mask |= (EPOLLOUT | EPOLLWRNORM | EPOLLPRI); return mask; } #ifdef CONFIG_DVB_MMAP static int dvb_dvr_mmap(struct file *file, struct vm_area_struct *vma) { struct dvb_device *dvbdev = file->private_data; struct dmxdev *dmxdev = dvbdev->priv; int ret; if (!dmxdev->may_do_mmap) return -ENOTTY; if (dmxdev->exit) return -ENODEV; if (mutex_lock_interruptible(&dmxdev->mutex)) return -ERESTARTSYS; ret = dvb_vb2_mmap(&dmxdev->dvr_vb2_ctx, vma); mutex_unlock(&dmxdev->mutex); return ret; } #endif static const struct file_operations dvb_dvr_fops = { .owner = THIS_MODULE, .read = dvb_dvr_read, .write = dvb_dvr_write, .unlocked_ioctl = dvb_dvr_ioctl, .open = dvb_dvr_open, .release = dvb_dvr_release, .poll = dvb_dvr_poll, .llseek = default_llseek, #ifdef CONFIG_DVB_MMAP .mmap = dvb_dvr_mmap, #endif }; static const struct dvb_device dvbdev_dvr = { .priv = NULL, .readers = 1, .users = 1, #if defined(CONFIG_MEDIA_CONTROLLER_DVB) .name = "dvb-dvr", #endif .fops = &dvb_dvr_fops }; int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter) { int i, ret; if (dmxdev->demux->open(dmxdev->demux) < 0) return -EUSERS; dmxdev->filter = vmalloc(array_size(sizeof(struct dmxdev_filter), dmxdev->filternum)); if (!dmxdev->filter) return -ENOMEM; mutex_init(&dmxdev->mutex); spin_lock_init(&dmxdev->lock); for (i = 0; i < dmxdev->filternum; i++) { dmxdev->filter[i].dev = dmxdev; dmxdev->filter[i].buffer.data = NULL; dvb_dmxdev_filter_state_set(&dmxdev->filter[i], DMXDEV_STATE_FREE); } ret = dvb_register_device(dvb_adapter, &dmxdev->dvbdev, &dvbdev_demux, dmxdev, DVB_DEVICE_DEMUX, dmxdev->filternum); if (ret < 0) goto err_register_dvbdev; ret = dvb_register_device(dvb_adapter, &dmxdev->dvr_dvbdev, &dvbdev_dvr, dmxdev, DVB_DEVICE_DVR, dmxdev->filternum); if (ret < 0) goto err_register_dvr_dvbdev; dvb_ringbuffer_init(&dmxdev->dvr_buffer, NULL, 8192); return 0; err_register_dvr_dvbdev: dvb_unregister_device(dmxdev->dvbdev); err_register_dvbdev: vfree(dmxdev->filter); dmxdev->filter = NULL; return ret; } EXPORT_SYMBOL(dvb_dmxdev_init); void dvb_dmxdev_release(struct dmxdev *dmxdev) { mutex_lock(&dmxdev->mutex); dmxdev->exit = 1; mutex_unlock(&dmxdev->mutex); if (dmxdev->dvbdev->users > 1) { wait_event(dmxdev->dvbdev->wait_queue, dmxdev->dvbdev->users == 1); } if (dmxdev->dvr_dvbdev->users > 1) { wait_event(dmxdev->dvr_dvbdev->wait_queue, dmxdev->dvr_dvbdev->users == 1); } dvb_unregister_device(dmxdev->dvbdev); dvb_unregister_device(dmxdev->dvr_dvbdev); vfree(dmxdev->filter); dmxdev->filter = NULL; dmxdev->demux->close(dmxdev->demux); } EXPORT_SYMBOL(dvb_dmxdev_release);
21 2 15 6 417 6 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_DESC_H #define _ASM_X86_DESC_H #include <asm/desc_defs.h> #include <asm/ldt.h> #include <asm/mmu.h> #include <asm/fixmap.h> #include <asm/irq_vectors.h> #include <asm/cpu_entry_area.h> #include <linux/debug_locks.h> #include <linux/smp.h> #include <linux/percpu.h> static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) { desc->limit0 = info->limit & 0x0ffff; desc->base0 = (info->base_addr & 0x0000ffff); desc->base1 = (info->base_addr & 0x00ff0000) >> 16; desc->type = (info->read_exec_only ^ 1) << 1; desc->type |= info->contents << 2; /* Set the ACCESS bit so it can be mapped RO */ desc->type |= 1; desc->s = 1; desc->dpl = 0x3; desc->p = info->seg_not_present ^ 1; desc->limit1 = (info->limit & 0xf0000) >> 16; desc->avl = info->useable; desc->d = info->seg_32bit; desc->g = info->limit_in_pages; desc->base2 = (info->base_addr & 0xff000000) >> 24; /* * Don't allow setting of the lm bit. It would confuse * user_64bit_mode and would get overridden by sysret anyway. */ desc->l = 0; } struct gdt_page { struct desc_struct gdt[GDT_ENTRIES]; } __attribute__((aligned(PAGE_SIZE))); DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); DECLARE_INIT_PER_CPU(gdt_page); /* Provide the original GDT */ static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu) { return per_cpu(gdt_page, cpu).gdt; } /* Provide the current original GDT */ static inline struct desc_struct *get_current_gdt_rw(void) { return this_cpu_ptr(&gdt_page)->gdt; } /* Provide the fixmap address of the remapped GDT */ static inline struct desc_struct *get_cpu_gdt_ro(int cpu) { return (struct desc_struct *)&get_cpu_entry_area(cpu)->gdt; } /* Provide the current read-only GDT */ static inline struct desc_struct *get_current_gdt_ro(void) { return get_cpu_gdt_ro(smp_processor_id()); } /* Provide the physical address of the GDT page. */ static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu) { return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu)); } static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func, unsigned dpl, unsigned ist, unsigned seg) { gate->offset_low = (u16) func; gate->bits.p = 1; gate->bits.dpl = dpl; gate->bits.zero = 0; gate->bits.type = type; gate->offset_middle = (u16) (func >> 16); #ifdef CONFIG_X86_64 gate->segment = __KERNEL_CS; gate->bits.ist = ist; gate->reserved = 0; gate->offset_high = (u32) (func >> 32); #else gate->segment = seg; gate->bits.ist = 0; #endif } static inline int desc_empty(const void *ptr) { const u32 *desc = ptr; return !(desc[0] | desc[1]); } #ifdef CONFIG_PARAVIRT_XXL #include <asm/paravirt.h> #else #define load_TR_desc() native_load_tr_desc() #define load_gdt(dtr) native_load_gdt(dtr) #define load_idt(dtr) native_load_idt(dtr) #define load_tr(tr) asm volatile("ltr %0"::"m" (tr)) #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) #define store_gdt(dtr) native_store_gdt(dtr) #define store_tr(tr) (tr = native_store_tr()) #define load_TLS(t, cpu) native_load_tls(t, cpu) #define set_ldt native_set_ldt #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc) #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type) #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g) static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) { } static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) { } #endif /* CONFIG_PARAVIRT_XXL */ #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt)) static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate) { memcpy(&idt[entry], gate, sizeof(*gate)); } static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) { memcpy(&ldt[entry], desc, 8); } static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type) { unsigned int size; switch (type) { case DESC_TSS: size = sizeof(tss_desc); break; case DESC_LDT: size = sizeof(ldt_desc); break; default: size = sizeof(*gdt); break; } memcpy(&gdt[entry], desc, size); } static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size) { struct ldttss_desc *desc = d; memset(desc, 0, sizeof(*desc)); desc->limit0 = (u16) size; desc->base0 = (u16) addr; desc->base1 = (addr >> 16) & 0xFF; desc->type = type; desc->p = 1; desc->limit1 = (size >> 16) & 0xF; desc->base2 = (addr >> 24) & 0xFF; #ifdef CONFIG_X86_64 desc->base3 = (u32) (addr >> 32); #endif } static inline void __set_tss_desc(unsigned cpu, unsigned int entry, struct x86_hw_tss *addr) { struct desc_struct *d = get_cpu_gdt_rw(cpu); tss_desc tss; set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, __KERNEL_TSS_LIMIT); write_gdt_entry(d, entry, &tss, DESC_TSS); } #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) static inline void native_set_ldt(const void *addr, unsigned int entries) { if (likely(entries == 0)) asm volatile("lldt %w0"::"q" (0)); else { unsigned cpu = smp_processor_id(); ldt_desc ldt; set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT, entries * LDT_ENTRY_SIZE - 1); write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_LDT, &ldt, DESC_LDT); asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); } } static inline void native_load_gdt(const struct desc_ptr *dtr) { asm volatile("lgdt %0"::"m" (*dtr)); } static __always_inline void native_load_idt(const struct desc_ptr *dtr) { asm volatile("lidt %0"::"m" (*dtr)); } static inline void native_store_gdt(struct desc_ptr *dtr) { asm volatile("sgdt %0":"=m" (*dtr)); } static inline void store_idt(struct desc_ptr *dtr) { asm volatile("sidt %0":"=m" (*dtr)); } static inline void native_gdt_invalidate(void) { const struct desc_ptr invalid_gdt = { .address = 0, .size = 0 }; native_load_gdt(&invalid_gdt); } static inline void native_idt_invalidate(void) { const struct desc_ptr invalid_idt = { .address = 0, .size = 0 }; native_load_idt(&invalid_idt); } /* * The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is * a read-only remapping. To prevent a page fault, the GDT is switched to the * original writeable version when needed. */ #ifdef CONFIG_X86_64 static inline void native_load_tr_desc(void) { struct desc_ptr gdt; int cpu = raw_smp_processor_id(); bool restore = 0; struct desc_struct *fixmap_gdt; native_store_gdt(&gdt); fixmap_gdt = get_cpu_gdt_ro(cpu); /* * If the current GDT is the read-only fixmap, swap to the original * writeable version. Swap back at the end. */ if (gdt.address == (unsigned long)fixmap_gdt) { load_direct_gdt(cpu); restore = 1; } asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); if (restore) load_fixmap_gdt(cpu); } #else static inline void native_load_tr_desc(void) { asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); } #endif static inline unsigned long native_store_tr(void) { unsigned long tr; asm volatile("str %0":"=r" (tr)); return tr; } static inline void native_load_tls(struct thread_struct *t, unsigned int cpu) { struct desc_struct *gdt = get_cpu_gdt_rw(cpu); unsigned int i; for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; } DECLARE_PER_CPU(bool, __tss_limit_invalid); static inline void force_reload_TR(void) { struct desc_struct *d = get_current_gdt_rw(); tss_desc tss; memcpy(&tss, &d[GDT_ENTRY_TSS], sizeof(tss_desc)); /* * LTR requires an available TSS, and the TSS is currently * busy. Make it be available so that LTR will work. */ tss.type = DESC_TSS; write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS); load_TR_desc(); this_cpu_write(__tss_limit_invalid, false); } /* * Call this if you need the TSS limit to be correct, which should be the case * if and only if you have TIF_IO_BITMAP set or you're switching to a task * with TIF_IO_BITMAP set. */ static inline void refresh_tss_limit(void) { DEBUG_LOCKS_WARN_ON(preemptible()); if (unlikely(this_cpu_read(__tss_limit_invalid))) force_reload_TR(); } /* * If you do something evil that corrupts the cached TSS limit (I'm looking * at you, VMX exits), call this function. * * The optimization here is that the TSS limit only matters for Linux if the * IO bitmap is in use. If the TSS limit gets forced to its minimum value, * everything works except that IO bitmap will be ignored and all CPL 3 IO * instructions will #GP, which is exactly what we want for normal tasks. */ static inline void invalidate_tss_limit(void) { DEBUG_LOCKS_WARN_ON(preemptible()); if (unlikely(test_thread_flag(TIF_IO_BITMAP))) force_reload_TR(); else this_cpu_write(__tss_limit_invalid, true); } /* This intentionally ignores lm, since 32-bit apps don't have that field. */ #define LDT_empty(info) \ ((info)->base_addr == 0 && \ (info)->limit == 0 && \ (info)->contents == 0 && \ (info)->read_exec_only == 1 && \ (info)->seg_32bit == 0 && \ (info)->limit_in_pages == 0 && \ (info)->seg_not_present == 1 && \ (info)->useable == 0) /* Lots of programs expect an all-zero user_desc to mean "no segment at all". */ static inline bool LDT_zero(const struct user_desc *info) { return (info->base_addr == 0 && info->limit == 0 && info->contents == 0 && info->read_exec_only == 0 && info->seg_32bit == 0 && info->limit_in_pages == 0 && info->seg_not_present == 0 && info->useable == 0); } static inline void clear_LDT(void) { set_ldt(NULL, 0); } static inline unsigned long get_desc_base(const struct desc_struct *desc) { return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); } static inline void set_desc_base(struct desc_struct *desc, unsigned long base) { desc->base0 = base & 0xffff; desc->base1 = (base >> 16) & 0xff; desc->base2 = (base >> 24) & 0xff; } static inline unsigned long get_desc_limit(const struct desc_struct *desc) { return desc->limit0 | (desc->limit1 << 16); } static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) { desc->limit0 = limit & 0xffff; desc->limit1 = (limit >> 16) & 0xf; } static inline void init_idt_data(struct idt_data *data, unsigned int n, const void *addr) { BUG_ON(n > 0xFF); memset(data, 0, sizeof(*data)); data->vector = n; data->addr = addr; data->segment = __KERNEL_CS; data->bits.type = GATE_INTERRUPT; data->bits.p = 1; } static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d) { unsigned long addr = (unsigned long) d->addr; gate->offset_low = (u16) addr; gate->segment = (u16) d->segment; gate->bits = d->bits; gate->offset_middle = (u16) (addr >> 16); #ifdef CONFIG_X86_64 gate->offset_high = (u32) (addr >> 32); gate->reserved = 0; #endif } extern unsigned long system_vectors[]; extern void load_current_idt(void); extern void idt_setup_early_handler(void); extern void idt_setup_early_traps(void); extern void idt_setup_traps(void); extern void idt_setup_apic_and_irq_gates(void); extern bool idt_is_f00f_address(unsigned long address); #ifdef CONFIG_X86_64 extern void idt_setup_early_pf(void); #else static inline void idt_setup_early_pf(void) { } #endif extern void idt_invalidate(void); #endif /* _ASM_X86_DESC_H */
28 28 28 1 1 1 1 25 25 25 2 23 4 5 13 4 1 17 18 18 22 22 21 1 21 5 21 21 6 21 17 17 2 1 17 17 17 25 9 16 16 16 15 4 4 4 4 4 46 46 46 46 23 1 5 9 16 4 9 4 4 8 5 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 // SPDX-License-Identifier: GPL-2.0-or-later /* * net/dccp/input.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> */ #include <linux/dccp.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include "ackvec.h" #include "ccid.h" #include "dccp.h" /* rate-limit for syncs in reply to sequence-invalid packets; RFC 4340, 7.5.4 */ int sysctl_dccp_sync_ratelimit __read_mostly = HZ / 8; static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb) { __skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4); __skb_queue_tail(&sk->sk_receive_queue, skb); skb_set_owner_r(skb, sk); sk->sk_data_ready(sk); } static void dccp_fin(struct sock *sk, struct sk_buff *skb) { /* * On receiving Close/CloseReq, both RD/WR shutdown are performed. * RFC 4340, 8.3 says that we MAY send further Data/DataAcks after * receiving the closing segment, but there is no guarantee that such * data will be processed at all. */ sk->sk_shutdown = SHUTDOWN_MASK; sock_set_flag(sk, SOCK_DONE); dccp_enqueue_skb(sk, skb); } static int dccp_rcv_close(struct sock *sk, struct sk_buff *skb) { int queued = 0; switch (sk->sk_state) { /* * We ignore Close when received in one of the following states: * - CLOSED (may be a late or duplicate packet) * - PASSIVE_CLOSEREQ (the peer has sent a CloseReq earlier) * - RESPOND (already handled by dccp_check_req) */ case DCCP_CLOSING: /* * Simultaneous-close: receiving a Close after sending one. This * can happen if both client and server perform active-close and * will result in an endless ping-pong of crossing and retrans- * mitted Close packets, which only terminates when one of the * nodes times out (min. 64 seconds). Quicker convergence can be * achieved when one of the nodes acts as tie-breaker. * This is ok as both ends are done with data transfer and each * end is just waiting for the other to acknowledge termination. */ if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) break; fallthrough; case DCCP_REQUESTING: case DCCP_ACTIVE_CLOSEREQ: dccp_send_reset(sk, DCCP_RESET_CODE_CLOSED); dccp_done(sk); break; case DCCP_OPEN: case DCCP_PARTOPEN: /* Give waiting application a chance to read pending data */ queued = 1; dccp_fin(sk, skb); dccp_set_state(sk, DCCP_PASSIVE_CLOSE); fallthrough; case DCCP_PASSIVE_CLOSE: /* * Retransmitted Close: we have already enqueued the first one. */ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); } return queued; } static int dccp_rcv_closereq(struct sock *sk, struct sk_buff *skb) { int queued = 0; /* * Step 7: Check for unexpected packet types * If (S.is_server and P.type == CloseReq) * Send Sync packet acknowledging P.seqno * Drop packet and return */ if (dccp_sk(sk)->dccps_role != DCCP_ROLE_CLIENT) { dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); return queued; } /* Step 13: process relevant Client states < CLOSEREQ */ switch (sk->sk_state) { case DCCP_REQUESTING: dccp_send_close(sk, 0); dccp_set_state(sk, DCCP_CLOSING); break; case DCCP_OPEN: case DCCP_PARTOPEN: /* Give waiting application a chance to read pending data */ queued = 1; dccp_fin(sk, skb); dccp_set_state(sk, DCCP_PASSIVE_CLOSEREQ); fallthrough; case DCCP_PASSIVE_CLOSEREQ: sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); } return queued; } static u16 dccp_reset_code_convert(const u8 code) { static const u16 error_code[] = { [DCCP_RESET_CODE_CLOSED] = 0, /* normal termination */ [DCCP_RESET_CODE_UNSPECIFIED] = 0, /* nothing known */ [DCCP_RESET_CODE_ABORTED] = ECONNRESET, [DCCP_RESET_CODE_NO_CONNECTION] = ECONNREFUSED, [DCCP_RESET_CODE_CONNECTION_REFUSED] = ECONNREFUSED, [DCCP_RESET_CODE_TOO_BUSY] = EUSERS, [DCCP_RESET_CODE_AGGRESSION_PENALTY] = EDQUOT, [DCCP_RESET_CODE_PACKET_ERROR] = ENOMSG, [DCCP_RESET_CODE_BAD_INIT_COOKIE] = EBADR, [DCCP_RESET_CODE_BAD_SERVICE_CODE] = EBADRQC, [DCCP_RESET_CODE_OPTION_ERROR] = EILSEQ, [DCCP_RESET_CODE_MANDATORY_ERROR] = EOPNOTSUPP, }; return code >= DCCP_MAX_RESET_CODES ? 0 : error_code[code]; } static void dccp_rcv_reset(struct sock *sk, struct sk_buff *skb) { u16 err = dccp_reset_code_convert(dccp_hdr_reset(skb)->dccph_reset_code); sk->sk_err = err; /* Queue the equivalent of TCP fin so that dccp_recvmsg exits the loop */ dccp_fin(sk, skb); if (err && !sock_flag(sk, SOCK_DEAD)) sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); dccp_time_wait(sk, DCCP_TIME_WAIT, 0); } static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb) { struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec; if (av == NULL) return; if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq); dccp_ackvec_input(av, skb); } static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb) { const struct dccp_sock *dp = dccp_sk(sk); /* Don't deliver to RX CCID when node has shut down read end. */ if (!(sk->sk_shutdown & RCV_SHUTDOWN)) ccid_hc_rx_packet_recv(dp->dccps_hc_rx_ccid, sk, skb); /* * Until the TX queue has been drained, we can not honour SHUT_WR, since * we need received feedback as input to adjust congestion control. */ if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN)) ccid_hc_tx_packet_recv(dp->dccps_hc_tx_ccid, sk, skb); } static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); struct dccp_sock *dp = dccp_sk(sk); u64 lswl, lawl, seqno = DCCP_SKB_CB(skb)->dccpd_seq, ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq; /* * Step 5: Prepare sequence numbers for Sync * If P.type == Sync or P.type == SyncAck, * If S.AWL <= P.ackno <= S.AWH and P.seqno >= S.SWL, * / * P is valid, so update sequence number variables * accordingly. After this update, P will pass the tests * in Step 6. A SyncAck is generated if necessary in * Step 15 * / * Update S.GSR, S.SWL, S.SWH * Otherwise, * Drop packet and return */ if (dh->dccph_type == DCCP_PKT_SYNC || dh->dccph_type == DCCP_PKT_SYNCACK) { if (between48(ackno, dp->dccps_awl, dp->dccps_awh) && dccp_delta_seqno(dp->dccps_swl, seqno) >= 0) dccp_update_gsr(sk, seqno); else return -1; } /* * Step 6: Check sequence numbers * Let LSWL = S.SWL and LAWL = S.AWL * If P.type == CloseReq or P.type == Close or P.type == Reset, * LSWL := S.GSR + 1, LAWL := S.GAR * If LSWL <= P.seqno <= S.SWH * and (P.ackno does not exist or LAWL <= P.ackno <= S.AWH), * Update S.GSR, S.SWL, S.SWH * If P.type != Sync, * Update S.GAR */ lswl = dp->dccps_swl; lawl = dp->dccps_awl; if (dh->dccph_type == DCCP_PKT_CLOSEREQ || dh->dccph_type == DCCP_PKT_CLOSE || dh->dccph_type == DCCP_PKT_RESET) { lswl = ADD48(dp->dccps_gsr, 1); lawl = dp->dccps_gar; } if (between48(seqno, lswl, dp->dccps_swh) && (ackno == DCCP_PKT_WITHOUT_ACK_SEQ || between48(ackno, lawl, dp->dccps_awh))) { dccp_update_gsr(sk, seqno); if (dh->dccph_type != DCCP_PKT_SYNC && ackno != DCCP_PKT_WITHOUT_ACK_SEQ && after48(ackno, dp->dccps_gar)) dp->dccps_gar = ackno; } else { unsigned long now = jiffies; /* * Step 6: Check sequence numbers * Otherwise, * If P.type == Reset, * Send Sync packet acknowledging S.GSR * Otherwise, * Send Sync packet acknowledging P.seqno * Drop packet and return * * These Syncs are rate-limited as per RFC 4340, 7.5.4: * at most 1 / (dccp_sync_rate_limit * HZ) Syncs per second. */ if (time_before(now, (dp->dccps_rate_last + sysctl_dccp_sync_ratelimit))) return -1; DCCP_WARN("Step 6 failed for %s packet, " "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " "(P.ackno %s or LAWL(%llu) <= P.ackno(%llu) <= S.AWH(%llu), " "sending SYNC...\n", dccp_packet_name(dh->dccph_type), (unsigned long long) lswl, (unsigned long long) seqno, (unsigned long long) dp->dccps_swh, (ackno == DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists", (unsigned long long) lawl, (unsigned long long) ackno, (unsigned long long) dp->dccps_awh); dp->dccps_rate_last = now; if (dh->dccph_type == DCCP_PKT_RESET) seqno = dp->dccps_gsr; dccp_send_sync(sk, seqno, DCCP_PKT_SYNC); return -1; } return 0; } static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, const unsigned int len) { struct dccp_sock *dp = dccp_sk(sk); switch (dccp_hdr(skb)->dccph_type) { case DCCP_PKT_DATAACK: case DCCP_PKT_DATA: /* * FIXME: schedule DATA_DROPPED (RFC 4340, 11.7.2) if and when * - sk_shutdown == RCV_SHUTDOWN, use Code 1, "Not Listening" * - sk_receive_queue is full, use Code 2, "Receive Buffer" */ dccp_enqueue_skb(sk, skb); return 0; case DCCP_PKT_ACK: goto discard; case DCCP_PKT_RESET: /* * Step 9: Process Reset * If P.type == Reset, * Tear down connection * S.state := TIMEWAIT * Set TIMEWAIT timer * Drop packet and return */ dccp_rcv_reset(sk, skb); return 0; case DCCP_PKT_CLOSEREQ: if (dccp_rcv_closereq(sk, skb)) return 0; goto discard; case DCCP_PKT_CLOSE: if (dccp_rcv_close(sk, skb)) return 0; goto discard; case DCCP_PKT_REQUEST: /* Step 7 * or (S.is_server and P.type == Response) * or (S.is_client and P.type == Request) * or (S.state >= OPEN and P.type == Request * and P.seqno >= S.OSR) * or (S.state >= OPEN and P.type == Response * and P.seqno >= S.OSR) * or (S.state == RESPOND and P.type == Data), * Send Sync packet acknowledging P.seqno * Drop packet and return */ if (dp->dccps_role != DCCP_ROLE_LISTEN) goto send_sync; goto check_seq; case DCCP_PKT_RESPONSE: if (dp->dccps_role != DCCP_ROLE_CLIENT) goto send_sync; check_seq: if (dccp_delta_seqno(dp->dccps_osr, DCCP_SKB_CB(skb)->dccpd_seq) >= 0) { send_sync: dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNC); } break; case DCCP_PKT_SYNC: dccp_send_sync(sk, DCCP_SKB_CB(skb)->dccpd_seq, DCCP_PKT_SYNCACK); /* * From RFC 4340, sec. 5.7 * * As with DCCP-Ack packets, DCCP-Sync and DCCP-SyncAck packets * MAY have non-zero-length application data areas, whose * contents receivers MUST ignore. */ goto discard; } DCCP_INC_STATS(DCCP_MIB_INERRS); discard: __kfree_skb(skb); return 0; } int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, const unsigned int len) { if (dccp_check_seqno(sk, skb)) goto discard; if (dccp_parse_options(sk, NULL, skb)) return 1; dccp_handle_ackvec_processing(sk, skb); dccp_deliver_input_to_ccids(sk, skb); return __dccp_rcv_established(sk, skb, dh, len); discard: __kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(dccp_rcv_established); static int dccp_rcv_request_sent_state_process(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, const unsigned int len) { /* * Step 4: Prepare sequence numbers in REQUEST * If S.state == REQUEST, * If (P.type == Response or P.type == Reset) * and S.AWL <= P.ackno <= S.AWH, * / * Set sequence number variables corresponding to the * other endpoint, so P will pass the tests in Step 6 * / * Set S.GSR, S.ISR, S.SWL, S.SWH * / * Response processing continues in Step 10; Reset * processing continues in Step 9 * / */ if (dh->dccph_type == DCCP_PKT_RESPONSE) { const struct inet_connection_sock *icsk = inet_csk(sk); struct dccp_sock *dp = dccp_sk(sk); long tstamp = dccp_timestamp(); if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, dp->dccps_awl, dp->dccps_awh)) { dccp_pr_debug("invalid ackno: S.AWL=%llu, " "P.ackno=%llu, S.AWH=%llu\n", (unsigned long long)dp->dccps_awl, (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, (unsigned long long)dp->dccps_awh); goto out_invalid_packet; } /* * If option processing (Step 8) failed, return 1 here so that * dccp_v4_do_rcv() sends a Reset. The Reset code depends on * the option type and is set in dccp_parse_options(). */ if (dccp_parse_options(sk, NULL, skb)) return 1; /* Obtain usec RTT sample from SYN exchange (used by TFRC). */ if (likely(dp->dccps_options_received.dccpor_timestamp_echo)) dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp - dp->dccps_options_received.dccpor_timestamp_echo)); /* Stop the REQUEST timer */ inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); WARN_ON(sk->sk_send_head == NULL); kfree_skb(sk->sk_send_head); sk->sk_send_head = NULL; /* * Set ISR, GSR from packet. ISS was set in dccp_v{4,6}_connect * and GSS in dccp_transmit_skb(). Setting AWL/AWH and SWL/SWH * is done as part of activating the feature values below, since * these settings depend on the local/remote Sequence Window * features, which were undefined or not confirmed until now. */ dp->dccps_gsr = dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq; dccp_sync_mss(sk, icsk->icsk_pmtu_cookie); /* * Step 10: Process REQUEST state (second part) * If S.state == REQUEST, * / * If we get here, P is a valid Response from the * server (see Step 4), and we should move to * PARTOPEN state. PARTOPEN means send an Ack, * don't send Data packets, retransmit Acks * periodically, and always include any Init Cookie * from the Response * / * S.state := PARTOPEN * Set PARTOPEN timer * Continue with S.state == PARTOPEN * / * Step 12 will send the Ack completing the * three-way handshake * / */ dccp_set_state(sk, DCCP_PARTOPEN); /* * If feature negotiation was successful, activate features now; * an activation failure means that this host could not activate * one ore more features (e.g. insufficient memory), which would * leave at least one feature in an undefined state. */ if (dccp_feat_activate_values(sk, &dp->dccps_featneg)) goto unable_to_proceed; /* Make sure socket is routed, for correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); } if (sk->sk_write_pending || inet_csk_in_pingpong_mode(sk) || icsk->icsk_accept_queue.rskq_defer_accept) { /* Save one ACK. Data will be ready after * several ticks, if write_pending is set. * * It may be deleted, but with this feature tcpdumps * look so _wonderfully_ clever, that I was not able * to stand against the temptation 8) --ANK */ /* * OK, in DCCP we can as well do a similar trick, its * even in the draft, but there is no need for us to * schedule an ack here, as dccp_sendmsg does this for * us, also stated in the draft. -acme */ __kfree_skb(skb); return 0; } dccp_send_ack(sk); return -1; } out_invalid_packet: /* dccp_v4_do_rcv will send a reset */ DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR; return 1; unable_to_proceed: DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_ABORTED; /* * We mark this socket as no longer usable, so that the loop in * dccp_sendmsg() terminates and the application gets notified. */ dccp_set_state(sk, DCCP_CLOSED); sk->sk_err = ECOMM; return 1; } static int dccp_rcv_respond_partopen_state_process(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, const unsigned int len) { struct dccp_sock *dp = dccp_sk(sk); u32 sample = dp->dccps_options_received.dccpor_timestamp_echo; int queued = 0; switch (dh->dccph_type) { case DCCP_PKT_RESET: inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); break; case DCCP_PKT_DATA: if (sk->sk_state == DCCP_RESPOND) break; fallthrough; case DCCP_PKT_DATAACK: case DCCP_PKT_ACK: /* * FIXME: we should be resetting the PARTOPEN (DELACK) timer * here but only if we haven't used the DELACK timer for * something else, like sending a delayed ack for a TIMESTAMP * echo, etc, for now were not clearing it, sending an extra * ACK when there is nothing else to do in DELACK is not a big * deal after all. */ /* Stop the PARTOPEN timer */ if (sk->sk_state == DCCP_PARTOPEN) inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); /* Obtain usec RTT sample from SYN exchange (used by TFRC). */ if (likely(sample)) { long delta = dccp_timestamp() - sample; dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta); } dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq; dccp_set_state(sk, DCCP_OPEN); if (dh->dccph_type == DCCP_PKT_DATAACK || dh->dccph_type == DCCP_PKT_DATA) { __dccp_rcv_established(sk, skb, dh, len); queued = 1; /* packet was queued (by __dccp_rcv_established) */ } break; } return queued; } int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, struct dccp_hdr *dh, unsigned int len) { struct dccp_sock *dp = dccp_sk(sk); struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); const int old_state = sk->sk_state; bool acceptable; int queued = 0; /* * Step 3: Process LISTEN state * * If S.state == LISTEN, * If P.type == Request or P contains a valid Init Cookie option, * (* Must scan the packet's options to check for Init * Cookies. Only Init Cookies are processed here, * however; other options are processed in Step 8. This * scan need only be performed if the endpoint uses Init * Cookies *) * (* Generate a new socket and switch to that socket *) * Set S := new socket for this port pair * S.state = RESPOND * Choose S.ISS (initial seqno) or set from Init Cookies * Initialize S.GAR := S.ISS * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init * Cookies Continue with S.state == RESPOND * (* A Response packet will be generated in Step 11 *) * Otherwise, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (sk->sk_state == DCCP_LISTEN) { if (dh->dccph_type == DCCP_PKT_REQUEST) { /* It is possible that we process SYN packets from backlog, * so we need to make sure to disable BH and RCU right there. */ rcu_read_lock(); local_bh_disable(); acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0; local_bh_enable(); rcu_read_unlock(); if (!acceptable) return 1; consume_skb(skb); return 0; } if (dh->dccph_type == DCCP_PKT_RESET) goto discard; /* Caller (dccp_v4_do_rcv) will send Reset */ dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; return 1; } else if (sk->sk_state == DCCP_CLOSED) { dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; return 1; } /* Step 6: Check sequence numbers (omitted in LISTEN/REQUEST state) */ if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb)) goto discard; /* * Step 7: Check for unexpected packet types * If (S.is_server and P.type == Response) * or (S.is_client and P.type == Request) * or (S.state == RESPOND and P.type == Data), * Send Sync packet acknowledging P.seqno * Drop packet and return */ if ((dp->dccps_role != DCCP_ROLE_CLIENT && dh->dccph_type == DCCP_PKT_RESPONSE) || (dp->dccps_role == DCCP_ROLE_CLIENT && dh->dccph_type == DCCP_PKT_REQUEST) || (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) { dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC); goto discard; } /* Step 8: Process options */ if (dccp_parse_options(sk, NULL, skb)) return 1; /* * Step 9: Process Reset * If P.type == Reset, * Tear down connection * S.state := TIMEWAIT * Set TIMEWAIT timer * Drop packet and return */ if (dh->dccph_type == DCCP_PKT_RESET) { dccp_rcv_reset(sk, skb); return 0; } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) { /* Step 13 */ if (dccp_rcv_closereq(sk, skb)) return 0; goto discard; } else if (dh->dccph_type == DCCP_PKT_CLOSE) { /* Step 14 */ if (dccp_rcv_close(sk, skb)) return 0; goto discard; } switch (sk->sk_state) { case DCCP_REQUESTING: queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); if (queued >= 0) return queued; __kfree_skb(skb); return 0; case DCCP_PARTOPEN: /* Step 8: if using Ack Vectors, mark packet acknowledgeable */ dccp_handle_ackvec_processing(sk, skb); dccp_deliver_input_to_ccids(sk, skb); fallthrough; case DCCP_RESPOND: queued = dccp_rcv_respond_partopen_state_process(sk, skb, dh, len); break; } if (dh->dccph_type == DCCP_PKT_ACK || dh->dccph_type == DCCP_PKT_DATAACK) { switch (old_state) { case DCCP_PARTOPEN: sk->sk_state_change(sk); sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT); break; } } else if (unlikely(dh->dccph_type == DCCP_PKT_SYNC)) { dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNCACK); goto discard; } if (!queued) { discard: __kfree_skb(skb); } return 0; } EXPORT_SYMBOL_GPL(dccp_rcv_state_process); /** * dccp_sample_rtt - Validate and finalise computation of RTT sample * @sk: socket structure * @delta: number of microseconds between packet and acknowledgment * * The routine is kept generic to work in different contexts. It should be * called immediately when the ACK used for the RTT sample arrives. */ u32 dccp_sample_rtt(struct sock *sk, long delta) { /* dccpor_elapsed_time is either zeroed out or set and > 0 */ delta -= dccp_sk(sk)->dccps_options_received.dccpor_elapsed_time * 10; if (unlikely(delta <= 0)) { DCCP_WARN("unusable RTT sample %ld, using min\n", delta); return DCCP_SANE_RTT_MIN; } if (unlikely(delta > DCCP_SANE_RTT_MAX)) { DCCP_WARN("RTT sample %ld too large, using max\n", delta); return DCCP_SANE_RTT_MAX; } return delta; }
60 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NET_FRAG_H__ #define __NET_FRAG_H__ #include <linux/rhashtable-types.h> #include <linux/completion.h> #include <linux/in6.h> #include <linux/rbtree_types.h> #include <linux/refcount.h> #include <net/dropreason-core.h> /* Per netns frag queues directory */ struct fqdir { /* sysctls */ long high_thresh; long low_thresh; int timeout; int max_dist; struct inet_frags *f; struct net *net; bool dead; struct rhashtable rhashtable ____cacheline_aligned_in_smp; /* Keep atomic mem on separate cachelines in structs that include it */ atomic_long_t mem ____cacheline_aligned_in_smp; struct work_struct destroy_work; struct llist_node free_list; }; /** * enum: fragment queue flags * * @INET_FRAG_FIRST_IN: first fragment has arrived * @INET_FRAG_LAST_IN: final fragment has arrived * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction * @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable * @INET_FRAG_DROP: if skbs must be dropped (instead of being consumed) */ enum { INET_FRAG_FIRST_IN = BIT(0), INET_FRAG_LAST_IN = BIT(1), INET_FRAG_COMPLETE = BIT(2), INET_FRAG_HASH_DEAD = BIT(3), INET_FRAG_DROP = BIT(4), }; struct frag_v4_compare_key { __be32 saddr; __be32 daddr; u32 user; u32 vif; __be16 id; u16 protocol; }; struct frag_v6_compare_key { struct in6_addr saddr; struct in6_addr daddr; u32 user; __be32 id; u32 iif; }; /** * struct inet_frag_queue - fragment queue * * @node: rhash node * @key: keys identifying this frag. * @timer: queue expiration timer * @lock: spinlock protecting this frag * @refcnt: reference count of the queue * @rb_fragments: received fragments rb-tree root * @fragments_tail: received fragments tail * @last_run_head: the head of the last "run". see ip_fragment.c * @stamp: timestamp of the last received fragment * @len: total length of the original datagram * @meat: length of received fragments so far * @tstamp_type: stamp has a mono delivery time (EDT) * @flags: fragment queue flags * @max_size: maximum received fragment size * @fqdir: pointer to struct fqdir * @rcu: rcu head for freeing deferall */ struct inet_frag_queue { struct rhash_head node; union { struct frag_v4_compare_key v4; struct frag_v6_compare_key v6; } key; struct timer_list timer; spinlock_t lock; refcount_t refcnt; struct rb_root rb_fragments; struct sk_buff *fragments_tail; struct sk_buff *last_run_head; ktime_t stamp; int len; int meat; u8 tstamp_type; __u8 flags; u16 max_size; struct fqdir *fqdir; struct rcu_head rcu; }; struct inet_frags { unsigned int qsize; void (*constructor)(struct inet_frag_queue *q, const void *arg); void (*destructor)(struct inet_frag_queue *); void (*frag_expire)(struct timer_list *t); struct kmem_cache *frags_cachep; const char *frags_cache_name; struct rhashtable_params rhash_params; refcount_t refcnt; struct completion completion; }; int inet_frags_init(struct inet_frags *); void inet_frags_fini(struct inet_frags *); int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net); static inline void fqdir_pre_exit(struct fqdir *fqdir) { /* Prevent creation of new frags. * Pairs with READ_ONCE() in inet_frag_find(). */ WRITE_ONCE(fqdir->high_thresh, 0); /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire() * and ip6frag_expire_frag_queue(). */ WRITE_ONCE(fqdir->dead, true); } void fqdir_exit(struct fqdir *fqdir); void inet_frag_kill(struct inet_frag_queue *q); void inet_frag_destroy(struct inet_frag_queue *q); struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key); /* Free all skbs in the queue; return the sum of their truesizes. */ unsigned int inet_frag_rbtree_purge(struct rb_root *root, enum skb_drop_reason reason); static inline void inet_frag_put(struct inet_frag_queue *q) { if (refcount_dec_and_test(&q->refcnt)) inet_frag_destroy(q); } /* Memory Tracking Functions. */ static inline long frag_mem_limit(const struct fqdir *fqdir) { return atomic_long_read(&fqdir->mem); } static inline void sub_frag_mem_limit(struct fqdir *fqdir, long val) { atomic_long_sub(val, &fqdir->mem); } static inline void add_frag_mem_limit(struct fqdir *fqdir, long val) { atomic_long_add(val, &fqdir->mem); } /* RFC 3168 support : * We want to check ECN values of all fragments, do detect invalid combinations. * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. */ #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ extern const u8 ip_frag_ecn_table[16]; /* Return values of inet_frag_queue_insert() */ #define IPFRAG_OK 0 #define IPFRAG_DUP 1 #define IPFRAG_OVERLAP 2 int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb, int offset, int end); void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, struct sk_buff *parent); void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, void *reasm_data, bool try_coalesce); struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); #endif
2 4 2 2 1 1 3 15 8 2 5 3 4 3 6 6 6 15 4 9 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 // SPDX-License-Identifier: GPL-2.0+ /****************************************************************************** * cxacru.c - driver for USB ADSL modems based on * Conexant AccessRunner chipset * * Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan * Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru) * Copyright (C) 2007 Simon Arlott * Copyright (C) 2009 Simon Arlott ******************************************************************************/ /* * Credit is due for Josep Comas, who created the original patch to speedtch.c * to support the different padding used by the AccessRunner (now generalized * into usbatm), and the userspace firmware loading utility. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/mutex.h> #include <linux/unaligned.h> #include "usbatm.h" #define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott" #define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver" static const char cxacru_driver_name[] = "cxacru"; #define CXACRU_EP_CMD 0x01 /* Bulk/interrupt in/out */ #define CXACRU_EP_DATA 0x02 /* Bulk in/out */ #define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */ #define CMD_MAX_CONFIG ((CMD_PACKET_SIZE / 4 - 1) / 2) /* Addresses */ #define PLLFCLK_ADDR 0x00350068 #define PLLBCLK_ADDR 0x0035006c #define SDRAMEN_ADDR 0x00350010 #define FW_ADDR 0x00801000 #define BR_ADDR 0x00180600 #define SIG_ADDR 0x00180500 #define BR_STACK_ADDR 0x00187f10 /* Values */ #define SDRAM_ENA 0x1 #define CMD_TIMEOUT 2000 /* msecs */ #define POLL_INTERVAL 1 /* secs */ /* commands for interaction with the modem through the control channel before * firmware is loaded */ enum cxacru_fw_request { FW_CMD_ERR, FW_GET_VER, FW_READ_MEM, FW_WRITE_MEM, FW_RMW_MEM, FW_CHECKSUM_MEM, FW_GOTO_MEM, }; /* commands for interaction with the modem through the control channel once * firmware is loaded */ enum cxacru_cm_request { CM_REQUEST_UNDEFINED = 0x80, CM_REQUEST_TEST, CM_REQUEST_CHIP_GET_MAC_ADDRESS, CM_REQUEST_CHIP_GET_DP_VERSIONS, CM_REQUEST_CHIP_ADSL_LINE_START, CM_REQUEST_CHIP_ADSL_LINE_STOP, CM_REQUEST_CHIP_ADSL_LINE_GET_STATUS, CM_REQUEST_CHIP_ADSL_LINE_GET_SPEED, CM_REQUEST_CARD_INFO_GET, CM_REQUEST_CARD_DATA_GET, CM_REQUEST_CARD_DATA_SET, CM_REQUEST_COMMAND_HW_IO, CM_REQUEST_INTERFACE_HW_IO, CM_REQUEST_CARD_SERIAL_DATA_PATH_GET, CM_REQUEST_CARD_SERIAL_DATA_PATH_SET, CM_REQUEST_CARD_CONTROLLER_VERSION_GET, CM_REQUEST_CARD_GET_STATUS, CM_REQUEST_CARD_GET_MAC_ADDRESS, CM_REQUEST_CARD_GET_DATA_LINK_STATUS, CM_REQUEST_MAX, }; /* commands for interaction with the flash memory * * read: response is the contents of the first 60 bytes of flash memory * write: request contains the 60 bytes of data to write to flash memory * response is the contents of the first 60 bytes of flash memory * * layout: PP PP VV VV MM MM MM MM MM MM ?? ?? SS SS SS SS SS SS SS SS * SS SS SS SS SS SS SS SS 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * * P: le16 USB Product ID * V: le16 USB Vendor ID * M: be48 MAC Address * S: le16 ASCII Serial Number */ enum cxacru_cm_flash { CM_FLASH_READ = 0xa1, CM_FLASH_WRITE = 0xa2 }; /* reply codes to the commands above */ enum cxacru_cm_status { CM_STATUS_UNDEFINED, CM_STATUS_SUCCESS, CM_STATUS_ERROR, CM_STATUS_UNSUPPORTED, CM_STATUS_UNIMPLEMENTED, CM_STATUS_PARAMETER_ERROR, CM_STATUS_DBG_LOOPBACK, CM_STATUS_MAX, }; /* indices into CARD_INFO_GET return array */ enum cxacru_info_idx { CXINF_DOWNSTREAM_RATE, CXINF_UPSTREAM_RATE, CXINF_LINK_STATUS, CXINF_LINE_STATUS, CXINF_MAC_ADDRESS_HIGH, CXINF_MAC_ADDRESS_LOW, CXINF_UPSTREAM_SNR_MARGIN, CXINF_DOWNSTREAM_SNR_MARGIN, CXINF_UPSTREAM_ATTENUATION, CXINF_DOWNSTREAM_ATTENUATION, CXINF_TRANSMITTER_POWER, CXINF_UPSTREAM_BITS_PER_FRAME, CXINF_DOWNSTREAM_BITS_PER_FRAME, CXINF_STARTUP_ATTEMPTS, CXINF_UPSTREAM_CRC_ERRORS, CXINF_DOWNSTREAM_CRC_ERRORS, CXINF_UPSTREAM_FEC_ERRORS, CXINF_DOWNSTREAM_FEC_ERRORS, CXINF_UPSTREAM_HEC_ERRORS, CXINF_DOWNSTREAM_HEC_ERRORS, CXINF_LINE_STARTABLE, CXINF_MODULATION, CXINF_ADSL_HEADEND, CXINF_ADSL_HEADEND_ENVIRONMENT, CXINF_CONTROLLER_VERSION, /* dunno what the missing two mean */ CXINF_MAX = 0x1c, }; enum cxacru_poll_state { CXPOLL_STOPPING, CXPOLL_STOPPED, CXPOLL_POLLING, CXPOLL_SHUTDOWN }; struct cxacru_modem_type { u32 pll_f_clk; u32 pll_b_clk; int boot_rom_patch; }; struct cxacru_data { struct usbatm_data *usbatm; const struct cxacru_modem_type *modem_type; int line_status; struct mutex adsl_state_serialize; int adsl_status; struct delayed_work poll_work; u32 card_info[CXINF_MAX]; struct mutex poll_state_serialize; enum cxacru_poll_state poll_state; /* control handles */ struct mutex cm_serialize; u8 *rcv_buf; u8 *snd_buf; struct urb *rcv_urb; struct urb *snd_urb; struct completion rcv_done; struct completion snd_done; }; static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm, u8 *wdata, int wsize, u8 *rdata, int rsize); static void cxacru_poll_status(struct work_struct *work); /* Card info exported through sysfs */ #define CXACRU__ATTR_INIT(_name) \ static DEVICE_ATTR_RO(_name) #define CXACRU_CMD_INIT(_name) \ static DEVICE_ATTR_RW(_name) #define CXACRU_SET_INIT(_name) \ static DEVICE_ATTR_WO(_name) #define CXACRU_ATTR_INIT(_value, _type, _name) \ static ssize_t _name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct cxacru_data *instance = to_usbatm_driver_data(\ to_usb_interface(dev)); \ \ if (instance == NULL) \ return -ENODEV; \ \ return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \ } \ CXACRU__ATTR_INIT(_name) #define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU_SET_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name) #define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) #define CXACRU_SET_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) #define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf) { return sprintf(buf, "%u\n", value); } static ssize_t cxacru_sysfs_showattr_s8(s8 value, char *buf) { return sprintf(buf, "%d\n", value); } static ssize_t cxacru_sysfs_showattr_dB(s16 value, char *buf) { if (likely(value >= 0)) { return snprintf(buf, PAGE_SIZE, "%u.%02u\n", value / 100, value % 100); } else { value = -value; return snprintf(buf, PAGE_SIZE, "-%u.%02u\n", value / 100, value % 100); } } static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf) { static char *str[] = { "no", "yes" }; if (unlikely(value >= ARRAY_SIZE(str))) return sprintf(buf, "%u\n", value); return sprintf(buf, "%s\n", str[value]); } static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf) { static char *str[] = { NULL, "not connected", "connected", "lost" }; if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL)) return sprintf(buf, "%u\n", value); return sprintf(buf, "%s\n", str[value]); } static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf) { static char *str[] = { "down", "attempting to activate", "training", "channel analysis", "exchange", "up", "waiting", "initialising" }; if (unlikely(value >= ARRAY_SIZE(str))) return sprintf(buf, "%u\n", value); return sprintf(buf, "%s\n", str[value]); } static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf) { static char *str[] = { "", "ANSI T1.413", "ITU-T G.992.1 (G.DMT)", "ITU-T G.992.2 (G.LITE)" }; if (unlikely(value >= ARRAY_SIZE(str))) return sprintf(buf, "%u\n", value); return sprintf(buf, "%s\n", str[value]); } /* * This could use MAC_ADDRESS_HIGH and MAC_ADDRESS_LOW, but since * this data is already in atm_dev there's no point. * * MAC_ADDRESS_HIGH = 0x????5544 * MAC_ADDRESS_LOW = 0x33221100 * Where 00-55 are bytes 0-5 of the MAC. */ static ssize_t mac_address_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxacru_data *instance = to_usbatm_driver_data( to_usb_interface(dev)); if (instance == NULL || instance->usbatm->atm_dev == NULL) return -ENODEV; return sprintf(buf, "%pM\n", instance->usbatm->atm_dev->esi); } static ssize_t adsl_state_show(struct device *dev, struct device_attribute *attr, char *buf) { static char *str[] = { "running", "stopped" }; struct cxacru_data *instance = to_usbatm_driver_data( to_usb_interface(dev)); u32 value; if (instance == NULL) return -ENODEV; value = instance->card_info[CXINF_LINE_STARTABLE]; if (unlikely(value >= ARRAY_SIZE(str))) return sprintf(buf, "%u\n", value); return sprintf(buf, "%s\n", str[value]); } static ssize_t adsl_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cxacru_data *instance = to_usbatm_driver_data( to_usb_interface(dev)); int ret; int poll = -1; char str_cmd[8]; int len = strlen(buf); if (!capable(CAP_NET_ADMIN)) return -EACCES; ret = sscanf(buf, "%7s", str_cmd); if (ret != 1) return -EINVAL; ret = 0; if (instance == NULL) return -ENODEV; if (mutex_lock_interruptible(&instance->adsl_state_serialize)) return -ERESTARTSYS; if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) { ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0); if (ret < 0) { atm_err(instance->usbatm, "change adsl state:" " CHIP_ADSL_LINE_STOP returned %d\n", ret); ret = -EIO; } else { ret = len; poll = CXPOLL_STOPPED; } } /* Line status is only updated every second * and the device appears to only react to * START/STOP every second too. Wait 1.5s to * be sure that restart will have an effect. */ if (!strcmp(str_cmd, "restart")) msleep(1500); if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) { ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0); if (ret < 0) { atm_err(instance->usbatm, "change adsl state:" " CHIP_ADSL_LINE_START returned %d\n", ret); ret = -EIO; } else { ret = len; poll = CXPOLL_POLLING; } } if (!strcmp(str_cmd, "poll")) { ret = len; poll = CXPOLL_POLLING; } if (ret == 0) { ret = -EINVAL; poll = -1; } if (poll == CXPOLL_POLLING) { mutex_lock(&instance->poll_state_serialize); switch (instance->poll_state) { case CXPOLL_STOPPED: /* start polling */ instance->poll_state = CXPOLL_POLLING; break; case CXPOLL_STOPPING: /* abort stop request */ instance->poll_state = CXPOLL_POLLING; fallthrough; case CXPOLL_POLLING: case CXPOLL_SHUTDOWN: /* don't start polling */ poll = -1; } mutex_unlock(&instance->poll_state_serialize); } else if (poll == CXPOLL_STOPPED) { mutex_lock(&instance->poll_state_serialize); /* request stop */ if (instance->poll_state == CXPOLL_POLLING) instance->poll_state = CXPOLL_STOPPING; mutex_unlock(&instance->poll_state_serialize); } mutex_unlock(&instance->adsl_state_serialize); if (poll == CXPOLL_POLLING) cxacru_poll_status(&instance->poll_work.work); return ret; } /* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */ static ssize_t adsl_config_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cxacru_data *instance = to_usbatm_driver_data( to_usb_interface(dev)); int len = strlen(buf); int ret, pos, num; __le32 data[CMD_PACKET_SIZE / 4]; if (!capable(CAP_NET_ADMIN)) return -EACCES; if (instance == NULL) return -ENODEV; pos = 0; num = 0; while (pos < len) { int tmp; u32 index; u32 value; ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp); if (ret < 2) return -EINVAL; if (index > 0x7f) return -EINVAL; if (tmp < 0 || tmp > len - pos) return -EINVAL; pos += tmp; /* skip trailing newline */ if (buf[pos] == '\n' && pos == len-1) pos++; data[num * 2 + 1] = cpu_to_le32(index); data[num * 2 + 2] = cpu_to_le32(value); num++; /* send config values when data buffer is full * or no more data */ if (pos >= len || num >= CMD_MAX_CONFIG) { char log[CMD_MAX_CONFIG * 12 + 1]; /* %02x=%08x */ data[0] = cpu_to_le32(num); ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET, (u8 *) data, 4 + num * 8, NULL, 0); if (ret < 0) { atm_err(instance->usbatm, "set card data returned %d\n", ret); return -EIO; } for (tmp = 0; tmp < num; tmp++) snprintf(log + tmp*12, 13, " %02x=%08x", le32_to_cpu(data[tmp * 2 + 1]), le32_to_cpu(data[tmp * 2 + 2])); atm_info(instance->usbatm, "config%s\n", log); num = 0; } } return len; } /* * All device attributes are included in CXACRU_ALL_FILES * so that the same list can be used multiple times: * INIT (define the device attributes) * CREATE (create all the device files) * REMOVE (remove all the device files) * * With the last two being defined as needed in the functions * they are used in before calling CXACRU_ALL_FILES() */ #define CXACRU_ALL_FILES(_action) \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_RATE, u32, downstream_rate); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_RATE, u32, upstream_rate); \ CXACRU_ATTR_##_action(CXINF_LINK_STATUS, LINK, link_status); \ CXACRU_ATTR_##_action(CXINF_LINE_STATUS, LINE, line_status); \ CXACRU__ATTR_##_action( mac_address); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_SNR_MARGIN, dB, upstream_snr_margin); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_SNR_MARGIN, dB, downstream_snr_margin); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_ATTENUATION, dB, upstream_attenuation); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_ATTENUATION, dB, downstream_attenuation); \ CXACRU_ATTR_##_action(CXINF_TRANSMITTER_POWER, s8, transmitter_power); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_BITS_PER_FRAME, u32, upstream_bits_per_frame); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_BITS_PER_FRAME, u32, downstream_bits_per_frame); \ CXACRU_ATTR_##_action(CXINF_STARTUP_ATTEMPTS, u32, startup_attempts); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_CRC_ERRORS, u32, upstream_crc_errors); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_CRC_ERRORS, u32, downstream_crc_errors); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_FEC_ERRORS, u32, upstream_fec_errors); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_FEC_ERRORS, u32, downstream_fec_errors); \ CXACRU_ATTR_##_action(CXINF_UPSTREAM_HEC_ERRORS, u32, upstream_hec_errors); \ CXACRU_ATTR_##_action(CXINF_DOWNSTREAM_HEC_ERRORS, u32, downstream_hec_errors); \ CXACRU_ATTR_##_action(CXINF_LINE_STARTABLE, bool, line_startable); \ CXACRU_ATTR_##_action(CXINF_MODULATION, MODU, modulation); \ CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \ CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \ CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \ CXACRU_CMD_##_action( adsl_state); \ CXACRU_SET_##_action( adsl_config); CXACRU_ALL_FILES(INIT); static struct attribute *cxacru_attrs[] = { &dev_attr_adsl_config.attr, &dev_attr_adsl_state.attr, &dev_attr_adsl_controller_version.attr, &dev_attr_adsl_headend_environment.attr, &dev_attr_adsl_headend.attr, &dev_attr_modulation.attr, &dev_attr_line_startable.attr, &dev_attr_downstream_hec_errors.attr, &dev_attr_upstream_hec_errors.attr, &dev_attr_downstream_fec_errors.attr, &dev_attr_upstream_fec_errors.attr, &dev_attr_downstream_crc_errors.attr, &dev_attr_upstream_crc_errors.attr, &dev_attr_startup_attempts.attr, &dev_attr_downstream_bits_per_frame.attr, &dev_attr_upstream_bits_per_frame.attr, &dev_attr_transmitter_power.attr, &dev_attr_downstream_attenuation.attr, &dev_attr_upstream_attenuation.attr, &dev_attr_downstream_snr_margin.attr, &dev_attr_upstream_snr_margin.attr, &dev_attr_mac_address.attr, &dev_attr_line_status.attr, &dev_attr_link_status.attr, &dev_attr_upstream_rate.attr, &dev_attr_downstream_rate.attr, NULL, }; ATTRIBUTE_GROUPS(cxacru); /* the following three functions are stolen from drivers/usb/core/message.c */ static void cxacru_blocking_completion(struct urb *urb) { complete(urb->context); } struct cxacru_timer { struct timer_list timer; struct urb *urb; }; static void cxacru_timeout_kill(struct timer_list *t) { struct cxacru_timer *timer = from_timer(timer, t, timer); usb_unlink_urb(timer->urb); } static int cxacru_start_wait_urb(struct urb *urb, struct completion *done, int *actual_length) { struct cxacru_timer timer = { .urb = urb, }; timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0); mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT)); wait_for_completion(done); del_timer_sync(&timer.timer); destroy_timer_on_stack(&timer.timer); if (actual_length) *actual_length = urb->actual_length; return urb->status; /* must read status after completion */ } static int cxacru_cm(struct cxacru_data *instance, enum cxacru_cm_request cm, u8 *wdata, int wsize, u8 *rdata, int rsize) { int ret, actlen; int offb, offd; const int stride = CMD_PACKET_SIZE - 4; u8 *wbuf = instance->snd_buf; u8 *rbuf = instance->rcv_buf; int wbuflen = ((wsize - 1) / stride + 1) * CMD_PACKET_SIZE; int rbuflen = ((rsize - 1) / stride + 1) * CMD_PACKET_SIZE; if (wbuflen > PAGE_SIZE || rbuflen > PAGE_SIZE) { if (printk_ratelimit()) usb_err(instance->usbatm, "requested transfer size too large (%d, %d)\n", wbuflen, rbuflen); ret = -ENOMEM; goto err; } mutex_lock(&instance->cm_serialize); /* submit reading urb before the writing one */ init_completion(&instance->rcv_done); ret = usb_submit_urb(instance->rcv_urb, GFP_KERNEL); if (ret < 0) { if (printk_ratelimit()) usb_err(instance->usbatm, "submit of read urb for cm %#x failed (%d)\n", cm, ret); goto fail; } memset(wbuf, 0, wbuflen); /* handle wsize == 0 */ wbuf[0] = cm; for (offb = offd = 0; offd < wsize; offd += stride, offb += CMD_PACKET_SIZE) { wbuf[offb] = cm; memcpy(wbuf + offb + 4, wdata + offd, min_t(int, stride, wsize - offd)); } instance->snd_urb->transfer_buffer_length = wbuflen; init_completion(&instance->snd_done); ret = usb_submit_urb(instance->snd_urb, GFP_KERNEL); if (ret < 0) { if (printk_ratelimit()) usb_err(instance->usbatm, "submit of write urb for cm %#x failed (%d)\n", cm, ret); goto fail; } ret = cxacru_start_wait_urb(instance->snd_urb, &instance->snd_done, NULL); if (ret < 0) { if (printk_ratelimit()) usb_err(instance->usbatm, "send of cm %#x failed (%d)\n", cm, ret); goto fail; } ret = cxacru_start_wait_urb(instance->rcv_urb, &instance->rcv_done, &actlen); if (ret < 0) { if (printk_ratelimit()) usb_err(instance->usbatm, "receive of cm %#x failed (%d)\n", cm, ret); goto fail; } if (actlen % CMD_PACKET_SIZE || !actlen) { if (printk_ratelimit()) usb_err(instance->usbatm, "invalid response length to cm %#x: %d\n", cm, actlen); ret = -EIO; goto fail; } /* check the return status and copy the data to the output buffer, if needed */ for (offb = offd = 0; offd < rsize && offb < actlen; offb += CMD_PACKET_SIZE) { if (rbuf[offb] != cm) { if (printk_ratelimit()) usb_err(instance->usbatm, "wrong cm %#x in response to cm %#x\n", rbuf[offb], cm); ret = -EIO; goto fail; } if (rbuf[offb + 1] != CM_STATUS_SUCCESS) { if (printk_ratelimit()) usb_err(instance->usbatm, "response to cm %#x failed: %#x\n", cm, rbuf[offb + 1]); ret = -EIO; goto fail; } if (offd >= rsize) break; memcpy(rdata + offd, rbuf + offb + 4, min_t(int, stride, rsize - offd)); offd += stride; } ret = offd; usb_dbg(instance->usbatm, "cm %#x\n", cm); fail: mutex_unlock(&instance->cm_serialize); err: return ret; } static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_request cm, u32 *data, int size) { int ret, len; __le32 *buf; int offb; unsigned int offd; const int stride = CMD_PACKET_SIZE / (4 * 2) - 1; int buflen = ((size - 1) / stride + 1 + size * 2) * 4; buf = kmalloc(buflen, GFP_KERNEL); if (!buf) return -ENOMEM; ret = cxacru_cm(instance, cm, NULL, 0, (u8 *) buf, buflen); if (ret < 0) goto cleanup; /* len > 0 && len % 4 == 0 guaranteed by cxacru_cm() */ len = ret / 4; for (offb = 0; offb < len; ) { int l = le32_to_cpu(buf[offb++]); if (l < 0 || l > stride || l > (len - offb) / 2) { if (printk_ratelimit()) usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n", cm, l); ret = -EIO; goto cleanup; } while (l--) { offd = le32_to_cpu(buf[offb++]); if (offd >= size) { if (printk_ratelimit()) usb_err(instance->usbatm, "wrong index %#x in response to cm %#x\n", offd, cm); ret = -EIO; goto cleanup; } data[offd] = le32_to_cpu(buf[offb++]); } } ret = 0; cleanup: kfree(buf); return ret; } static int cxacru_card_status(struct cxacru_data *instance) { int ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0); if (ret < 0) { /* firmware not loaded */ usb_dbg(instance->usbatm, "cxacru_adsl_start: CARD_GET_STATUS returned %d\n", ret); return ret; } return 0; } static int cxacru_atm_start(struct usbatm_data *usbatm_instance, struct atm_dev *atm_dev) { struct cxacru_data *instance = usbatm_instance->driver_data; struct usb_interface *intf = usbatm_instance->usb_intf; int ret; int start_polling = 1; dev_dbg(&intf->dev, "%s\n", __func__); /* Read MAC address */ ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_MAC_ADDRESS, NULL, 0, atm_dev->esi, sizeof(atm_dev->esi)); if (ret < 0) { atm_err(usbatm_instance, "cxacru_atm_start: CARD_GET_MAC_ADDRESS returned %d\n", ret); return ret; } /* start ADSL */ mutex_lock(&instance->adsl_state_serialize); ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0); if (ret < 0) atm_err(usbatm_instance, "cxacru_atm_start: CHIP_ADSL_LINE_START returned %d\n", ret); /* Start status polling */ mutex_lock(&instance->poll_state_serialize); switch (instance->poll_state) { case CXPOLL_STOPPED: /* start polling */ instance->poll_state = CXPOLL_POLLING; break; case CXPOLL_STOPPING: /* abort stop request */ instance->poll_state = CXPOLL_POLLING; fallthrough; case CXPOLL_POLLING: case CXPOLL_SHUTDOWN: /* don't start polling */ start_polling = 0; } mutex_unlock(&instance->poll_state_serialize); mutex_unlock(&instance->adsl_state_serialize); if (start_polling) cxacru_poll_status(&instance->poll_work.work); return 0; } static void cxacru_poll_status(struct work_struct *work) { struct cxacru_data *instance = container_of(work, struct cxacru_data, poll_work.work); u32 buf[CXINF_MAX] = {}; struct usbatm_data *usbatm = instance->usbatm; struct atm_dev *atm_dev = usbatm->atm_dev; int keep_polling = 1; int ret; ret = cxacru_cm_get_array(instance, CM_REQUEST_CARD_INFO_GET, buf, CXINF_MAX); if (ret < 0) { if (ret != -ESHUTDOWN) atm_warn(usbatm, "poll status: error %d\n", ret); mutex_lock(&instance->poll_state_serialize); if (instance->poll_state != CXPOLL_SHUTDOWN) { instance->poll_state = CXPOLL_STOPPED; if (ret != -ESHUTDOWN) atm_warn(usbatm, "polling disabled, set adsl_state" " to 'start' or 'poll' to resume\n"); } mutex_unlock(&instance->poll_state_serialize); goto reschedule; } memcpy(instance->card_info, buf, sizeof(instance->card_info)); if (instance->adsl_status != buf[CXINF_LINE_STARTABLE]) { instance->adsl_status = buf[CXINF_LINE_STARTABLE]; switch (instance->adsl_status) { case 0: atm_info(usbatm, "ADSL state: running\n"); break; case 1: atm_info(usbatm, "ADSL state: stopped\n"); break; default: atm_info(usbatm, "Unknown adsl status %02x\n", instance->adsl_status); break; } } if (instance->line_status == buf[CXINF_LINE_STATUS]) goto reschedule; instance->line_status = buf[CXINF_LINE_STATUS]; switch (instance->line_status) { case 0: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: down\n"); break; case 1: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: attempting to activate\n"); break; case 2: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: training\n"); break; case 3: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: channel analysis\n"); break; case 4: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: exchange\n"); break; case 5: atm_dev->link_rate = buf[CXINF_DOWNSTREAM_RATE] * 1000 / 424; atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND); atm_info(usbatm, "ADSL line: up (%d kb/s down | %d kb/s up)\n", buf[CXINF_DOWNSTREAM_RATE], buf[CXINF_UPSTREAM_RATE]); break; case 6: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: waiting\n"); break; case 7: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST); atm_info(usbatm, "ADSL line: initializing\n"); break; default: atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN); atm_info(usbatm, "Unknown line state %02x\n", instance->line_status); break; } reschedule: mutex_lock(&instance->poll_state_serialize); if (instance->poll_state == CXPOLL_STOPPING && instance->adsl_status == 1 && /* stopped */ instance->line_status == 0) /* down */ instance->poll_state = CXPOLL_STOPPED; if (instance->poll_state == CXPOLL_STOPPED) keep_polling = 0; mutex_unlock(&instance->poll_state_serialize); if (keep_polling) schedule_delayed_work(&instance->poll_work, round_jiffies_relative(POLL_INTERVAL*HZ)); } static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw, u8 code1, u8 code2, u32 addr, const u8 *data, int size) { int ret; u8 *buf; int offd, offb; const int stride = CMD_PACKET_SIZE - 8; buf = (u8 *) __get_free_page(GFP_KERNEL); if (!buf) return -ENOMEM; offb = offd = 0; do { int l = min_t(int, stride, size - offd); buf[offb++] = fw; buf[offb++] = l; buf[offb++] = code1; buf[offb++] = code2; put_unaligned(cpu_to_le32(addr), (__le32 *)(buf + offb)); offb += 4; addr += l; if (l) memcpy(buf + offb, data + offd, l); if (l < stride) memset(buf + offb + l, 0, stride - l); offb += stride; offd += stride; if ((offb >= PAGE_SIZE) || (offd >= size)) { ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD), buf, offb, NULL, CMD_TIMEOUT); if (ret < 0) { dev_dbg(&usb_dev->dev, "sending fw %#x failed\n", fw); goto cleanup; } offb = 0; } } while (offd < size); dev_dbg(&usb_dev->dev, "sent fw %#x\n", fw); ret = 0; cleanup: free_page((unsigned long) buf); return ret; } static void cxacru_upload_firmware(struct cxacru_data *instance, const struct firmware *fw, const struct firmware *bp) { int ret; struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; __le16 signature[] = { usb_dev->descriptor.idVendor, usb_dev->descriptor.idProduct }; __le32 val; usb_dbg(usbatm, "%s\n", __func__); /* FirmwarePllFClkValue */ val = cpu_to_le32(instance->modem_type->pll_f_clk); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLFCLK_ADDR, (u8 *) &val, 4); if (ret) { usb_err(usbatm, "FirmwarePllFClkValue failed: %d\n", ret); return; } /* FirmwarePllBClkValue */ val = cpu_to_le32(instance->modem_type->pll_b_clk); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, PLLBCLK_ADDR, (u8 *) &val, 4); if (ret) { usb_err(usbatm, "FirmwarePllBClkValue failed: %d\n", ret); return; } /* Enable SDRAM */ val = cpu_to_le32(SDRAM_ENA); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SDRAMEN_ADDR, (u8 *) &val, 4); if (ret) { usb_err(usbatm, "Enable SDRAM failed: %d\n", ret); return; } /* Firmware */ usb_info(usbatm, "loading firmware\n"); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size); if (ret) { usb_err(usbatm, "Firmware upload failed: %d\n", ret); return; } /* Boot ROM patch */ if (instance->modem_type->boot_rom_patch) { usb_info(usbatm, "loading boot ROM patch\n"); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size); if (ret) { usb_err(usbatm, "Boot ROM patching failed: %d\n", ret); return; } } /* Signature */ ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, SIG_ADDR, (u8 *) signature, 4); if (ret) { usb_err(usbatm, "Signature storing failed: %d\n", ret); return; } usb_info(usbatm, "starting device\n"); if (instance->modem_type->boot_rom_patch) { val = cpu_to_le32(BR_ADDR); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4); } else { ret = cxacru_fw(usb_dev, FW_GOTO_MEM, 0x0, 0x0, FW_ADDR, NULL, 0); } if (ret) { usb_err(usbatm, "Passing control to firmware failed: %d\n", ret); return; } /* Delay to allow firmware to start up. */ msleep_interruptible(1000); usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD)); usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD)); usb_clear_halt(usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_DATA)); usb_clear_halt(usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_DATA)); ret = cxacru_cm(instance, CM_REQUEST_CARD_GET_STATUS, NULL, 0, NULL, 0); if (ret < 0) { usb_err(usbatm, "modem failed to initialize: %d\n", ret); return; } } static int cxacru_find_firmware(struct cxacru_data *instance, char *phase, const struct firmware **fw_p) { struct usbatm_data *usbatm = instance->usbatm; struct device *dev = &usbatm->usb_intf->dev; char buf[16]; sprintf(buf, "cxacru-%s.bin", phase); usb_dbg(usbatm, "cxacru_find_firmware: looking for %s\n", buf); if (request_firmware(fw_p, buf, dev)) { usb_dbg(usbatm, "no stage %s firmware found\n", phase); return -ENOENT; } usb_info(usbatm, "found firmware %s\n", buf); return 0; } static int cxacru_heavy_init(struct usbatm_data *usbatm_instance, struct usb_interface *usb_intf) { const struct firmware *fw, *bp; struct cxacru_data *instance = usbatm_instance->driver_data; int ret = cxacru_find_firmware(instance, "fw", &fw); if (ret) { usb_warn(usbatm_instance, "firmware (cxacru-fw.bin) unavailable (system misconfigured?)\n"); return ret; } if (instance->modem_type->boot_rom_patch) { ret = cxacru_find_firmware(instance, "bp", &bp); if (ret) { usb_warn(usbatm_instance, "boot ROM patch (cxacru-bp.bin) unavailable (system misconfigured?)\n"); release_firmware(fw); return ret; } } cxacru_upload_firmware(instance, fw, bp); if (instance->modem_type->boot_rom_patch) release_firmware(bp); release_firmware(fw); ret = cxacru_card_status(instance); if (ret) usb_dbg(usbatm_instance, "modem initialisation failed\n"); else usb_dbg(usbatm_instance, "done setting up the modem\n"); return ret; } static int cxacru_bind(struct usbatm_data *usbatm_instance, struct usb_interface *intf, const struct usb_device_id *id) { struct cxacru_data *instance; struct usb_device *usb_dev = interface_to_usbdev(intf); struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; struct usb_endpoint_descriptor *in, *out; int ret; /* instance init */ instance = kzalloc(sizeof(*instance), GFP_KERNEL); if (!instance) return -ENOMEM; instance->usbatm = usbatm_instance; instance->modem_type = (struct cxacru_modem_type *) id->driver_info; mutex_init(&instance->poll_state_serialize); instance->poll_state = CXPOLL_STOPPED; instance->line_status = -1; instance->adsl_status = -1; mutex_init(&instance->adsl_state_serialize); instance->rcv_buf = (u8 *) __get_free_page(GFP_KERNEL); if (!instance->rcv_buf) { usb_dbg(usbatm_instance, "cxacru_bind: no memory for rcv_buf\n"); ret = -ENOMEM; goto fail; } instance->snd_buf = (u8 *) __get_free_page(GFP_KERNEL); if (!instance->snd_buf) { usb_dbg(usbatm_instance, "cxacru_bind: no memory for snd_buf\n"); ret = -ENOMEM; goto fail; } instance->rcv_urb = usb_alloc_urb(0, GFP_KERNEL); if (!instance->rcv_urb) { ret = -ENOMEM; goto fail; } instance->snd_urb = usb_alloc_urb(0, GFP_KERNEL); if (!instance->snd_urb) { ret = -ENOMEM; goto fail; } if (!cmd_ep) { usb_dbg(usbatm_instance, "cxacru_bind: no command endpoint\n"); ret = -ENODEV; goto fail; } if (usb_endpoint_xfer_int(&cmd_ep->desc)) ret = usb_find_common_endpoints(intf->cur_altsetting, NULL, NULL, &in, &out); else ret = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL); if (ret) { usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n"); ret = -ENODEV; goto fail; } if ((cmd_ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT) { usb_fill_int_urb(instance->rcv_urb, usb_dev, usb_rcvintpipe(usb_dev, CXACRU_EP_CMD), instance->rcv_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->rcv_done, 1); usb_fill_int_urb(instance->snd_urb, usb_dev, usb_sndintpipe(usb_dev, CXACRU_EP_CMD), instance->snd_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->snd_done, 4); } else { usb_fill_bulk_urb(instance->rcv_urb, usb_dev, usb_rcvbulkpipe(usb_dev, CXACRU_EP_CMD), instance->rcv_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->rcv_done); usb_fill_bulk_urb(instance->snd_urb, usb_dev, usb_sndbulkpipe(usb_dev, CXACRU_EP_CMD), instance->snd_buf, PAGE_SIZE, cxacru_blocking_completion, &instance->snd_done); } mutex_init(&instance->cm_serialize); INIT_DELAYED_WORK(&instance->poll_work, cxacru_poll_status); usbatm_instance->driver_data = instance; usbatm_instance->flags = (cxacru_card_status(instance) ? 0 : UDSL_SKIP_HEAVY_INIT); return 0; fail: free_page((unsigned long) instance->snd_buf); free_page((unsigned long) instance->rcv_buf); usb_free_urb(instance->snd_urb); usb_free_urb(instance->rcv_urb); kfree(instance); return ret; } static void cxacru_unbind(struct usbatm_data *usbatm_instance, struct usb_interface *intf) { struct cxacru_data *instance = usbatm_instance->driver_data; int is_polling = 1; usb_dbg(usbatm_instance, "cxacru_unbind entered\n"); if (!instance) { usb_dbg(usbatm_instance, "cxacru_unbind: NULL instance!\n"); return; } mutex_lock(&instance->poll_state_serialize); BUG_ON(instance->poll_state == CXPOLL_SHUTDOWN); /* ensure that status polling continues unless * it has already stopped */ if (instance->poll_state == CXPOLL_STOPPED) is_polling = 0; /* stop polling from being stopped or started */ instance->poll_state = CXPOLL_SHUTDOWN; mutex_unlock(&instance->poll_state_serialize); if (is_polling) cancel_delayed_work_sync(&instance->poll_work); usb_kill_urb(instance->snd_urb); usb_kill_urb(instance->rcv_urb); usb_free_urb(instance->snd_urb); usb_free_urb(instance->rcv_urb); free_page((unsigned long) instance->snd_buf); free_page((unsigned long) instance->rcv_buf); kfree(instance); usbatm_instance->driver_data = NULL; } static const struct cxacru_modem_type cxacru_cafe = { .pll_f_clk = 0x02d874df, .pll_b_clk = 0x0196a51a, .boot_rom_patch = 1, }; static const struct cxacru_modem_type cxacru_cb00 = { .pll_f_clk = 0x5, .pll_b_clk = 0x3, .boot_rom_patch = 0, }; static const struct usb_device_id cxacru_usb_ids[] = { { /* V = Conexant P = ADSL modem (Euphrates project) */ USB_DEVICE(0x0572, 0xcafe), .driver_info = (unsigned long) &cxacru_cafe }, { /* V = Conexant P = ADSL modem (Hasbani project) */ USB_DEVICE(0x0572, 0xcb00), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Conexant P = ADSL modem */ USB_DEVICE(0x0572, 0xcb01), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Conexant P = ADSL modem (Well PTI-800) */ USB_DEVICE(0x0572, 0xcb02), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Conexant P = ADSL modem */ USB_DEVICE(0x0572, 0xcb06), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Conexant P = ADSL modem (ZTE ZXDSL 852) */ USB_DEVICE(0x0572, 0xcb07), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Olitec P = ADSL modem version 2 */ USB_DEVICE(0x08e3, 0x0100), .driver_info = (unsigned long) &cxacru_cafe }, { /* V = Olitec P = ADSL modem version 3 */ USB_DEVICE(0x08e3, 0x0102), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Trust/Amigo Technology Co. P = AMX-CA86U */ USB_DEVICE(0x0eb0, 0x3457), .driver_info = (unsigned long) &cxacru_cafe }, { /* V = Zoom P = 5510 */ USB_DEVICE(0x1803, 0x5510), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Draytek P = Vigor 318 */ USB_DEVICE(0x0675, 0x0200), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Zyxel P = 630-C1 aka OMNI ADSL USB (Annex A) */ USB_DEVICE(0x0586, 0x330a), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Zyxel P = 630-C3 aka OMNI ADSL USB (Annex B) */ USB_DEVICE(0x0586, 0x330b), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Aethra P = Starmodem UM1020 */ USB_DEVICE(0x0659, 0x0020), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Aztech Systems P = ? AKA Pirelli AUA-010 */ USB_DEVICE(0x0509, 0x0812), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Netopia P = Cayman 3341(Annex A)/3351(Annex B) */ USB_DEVICE(0x100d, 0xcb01), .driver_info = (unsigned long) &cxacru_cb00 }, { /* V = Netopia P = Cayman 3342(Annex A)/3352(Annex B) */ USB_DEVICE(0x100d, 0x3342), .driver_info = (unsigned long) &cxacru_cb00 }, {} }; MODULE_DEVICE_TABLE(usb, cxacru_usb_ids); static struct usbatm_driver cxacru_driver = { .driver_name = cxacru_driver_name, .bind = cxacru_bind, .heavy_init = cxacru_heavy_init, .unbind = cxacru_unbind, .atm_start = cxacru_atm_start, .bulk_in = CXACRU_EP_DATA, .bulk_out = CXACRU_EP_DATA, .rx_padding = 3, .tx_padding = 11, }; static int cxacru_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(intf); char buf[15]; /* Avoid ADSL routers (cx82310_eth). * Abort if bDeviceClass is 0xff and iProduct is "USB NET CARD". */ if (usb_dev->descriptor.bDeviceClass == USB_CLASS_VENDOR_SPEC && usb_string(usb_dev, usb_dev->descriptor.iProduct, buf, sizeof(buf)) > 0) { if (!strcmp(buf, "USB NET CARD")) { dev_info(&intf->dev, "ignoring cx82310_eth device\n"); return -ENODEV; } } return usbatm_usb_probe(intf, id, &cxacru_driver); } static struct usb_driver cxacru_usb_driver = { .name = cxacru_driver_name, .probe = cxacru_usb_probe, .disconnect = usbatm_usb_disconnect, .id_table = cxacru_usb_ids, .dev_groups = cxacru_groups, }; module_usb_driver(cxacru_usb_driver); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
1 1 1 1 1 16 16 16 14 14 4 1 4 10 1 10 15 15 2 3 9 10 32 2 28 3 28 2 1 1 5 1 6 40 40 31 3 6 35 4 32 7 32 7 27 12 16 24 40 31 9 26 14 21 20 12 28 4 2 2 1 1 1 74 67 10 58 39 26 16 21 21 62 8 8 6 2 4 6 74 59 15 74 3 71 2 72 34 42 74 74 74 3 45 45 7 3 41 42 42 79 79 79 25 1 3 178 179 185 42 42 88 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 // SPDX-License-Identifier: GPL-2.0-or-later /* -*- linux-c -*- * INET 802.1Q VLAN * Ethernet-type device handling. * * Authors: Ben Greear <greearb@candelatech.com> * Please send support related email to: netdev@vger.kernel.org * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html * * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com> * - reset skb->pkt_type on incoming packets when MAC was changed * - see that changed MAC is saddr for outgoing packets * Oct 20, 2001: Ard van Breeman: * - Fix MC-list, finally. * - Flush MC-list on VLAN destroy. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/net_tstamp.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <net/arp.h> #include <net/macsec.h> #include "vlan.h" #include "vlanproc.h" #include <linux/if_vlan.h> #include <linux/netpoll.h> /* * Create the VLAN header for an arbitrary protocol layer * * saddr=NULL means use device source address * daddr=NULL means leave destination address (eg unresolved arp) * * This is called when the SKB is moving down the stack towards the * physical devices. */ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_hdr *vhdr; unsigned int vhdrlen = 0; u16 vlan_tci = 0; int rc; if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) { vhdr = skb_push(skb, VLAN_HLEN); vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); vhdr->h_vlan_TCI = htons(vlan_tci); /* * Set the protocol type. For a packet of type ETH_P_802_3/2 we * put the length in here instead. */ if (type != ETH_P_802_3 && type != ETH_P_802_2) vhdr->h_vlan_encapsulated_proto = htons(type); else vhdr->h_vlan_encapsulated_proto = htons(len); skb->protocol = vlan->vlan_proto; type = ntohs(vlan->vlan_proto); vhdrlen = VLAN_HLEN; } /* Before delegating work to the lower layer, enter our MAC-address */ if (saddr == NULL) saddr = dev->dev_addr; /* Now make the underlying real hard header */ dev = vlan->real_dev; rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen); if (rc > 0) rc += vhdrlen; return rc; } static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb) { #ifdef CONFIG_NET_POLL_CONTROLLER return netpoll_send_skb(vlan->netpoll, skb); #else BUG(); return NETDEV_TX_OK; #endif } static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); unsigned int len; int ret; /* Handle non-VLAN frames if they are sent to us, for example by DHCP. * * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ if (vlan->flags & VLAN_FLAG_REORDER_HDR || veth->h_vlan_proto != vlan->vlan_proto) { u16 vlan_tci; vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci); } skb->dev = vlan->real_dev; len = skb->len; if (unlikely(netpoll_tx_running(dev))) return vlan_netpoll_send_skb(vlan, skb); ret = dev_queue_xmit(skb); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *stats; stats = this_cpu_ptr(vlan->vlan_pcpu_stats); u64_stats_update_begin(&stats->syncp); u64_stats_inc(&stats->tx_packets); u64_stats_add(&stats->tx_bytes, len); u64_stats_update_end(&stats->syncp); } else { this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped); } return ret; } static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; unsigned int max_mtu = real_dev->mtu; if (netif_reduces_vlan_mtu(real_dev)) max_mtu -= VLAN_HLEN; if (max_mtu < new_mtu) return -ERANGE; WRITE_ONCE(dev->mtu, new_mtu); return 0; } void vlan_dev_set_ingress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio) vlan->nr_ingress_mappings--; else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio) vlan->nr_ingress_mappings++; vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio; } int vlan_dev_set_egress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_priority_tci_mapping *mp = NULL; struct vlan_priority_tci_mapping *np; u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK; /* See if a priority mapping exists.. */ mp = vlan->egress_priority_map[skb_prio & 0xF]; while (mp) { if (mp->priority == skb_prio) { if (mp->vlan_qos && !vlan_qos) vlan->nr_egress_mappings--; else if (!mp->vlan_qos && vlan_qos) vlan->nr_egress_mappings++; mp->vlan_qos = vlan_qos; return 0; } mp = mp->next; } /* Create a new mapping then. */ mp = vlan->egress_priority_map[skb_prio & 0xF]; np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL); if (!np) return -ENOBUFS; np->next = mp; np->priority = skb_prio; np->vlan_qos = vlan_qos; /* Before inserting this element in hash table, make sure all its fields * are committed to memory. * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask() */ smp_wmb(); vlan->egress_priority_map[skb_prio & 0xF] = np; if (vlan_qos) vlan->nr_egress_mappings++; return 0; } /* Flags are defined in the vlan_flags enum in * include/uapi/linux/if_vlan.h file. */ int vlan_dev_change_flags(const struct net_device *dev, u32 flags, u32 mask) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); u32 old_flags = vlan->flags; if (mask & ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP | VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP | VLAN_FLAG_BRIDGE_BINDING)) return -EINVAL; vlan->flags = (old_flags & ~mask) | (flags & mask); if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_GVRP) { if (vlan->flags & VLAN_FLAG_GVRP) vlan_gvrp_request_join(dev); else vlan_gvrp_request_leave(dev); } if (netif_running(dev) && (vlan->flags ^ old_flags) & VLAN_FLAG_MVRP) { if (vlan->flags & VLAN_FLAG_MVRP) vlan_mvrp_request_join(dev); else vlan_mvrp_request_leave(dev); } return 0; } void vlan_dev_get_realdev_name(const struct net_device *dev, char *result, size_t size) { strscpy_pad(result, vlan_dev_priv(dev)->real_dev->name, size); } bool vlan_dev_inherit_address(struct net_device *dev, struct net_device *real_dev) { if (dev->addr_assign_type != NET_ADDR_STOLEN) return false; eth_hw_addr_set(dev, real_dev->dev_addr); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return true; } static int vlan_dev_open(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; int err; if (!(real_dev->flags & IFF_UP) && !(vlan->flags & VLAN_FLAG_LOOSE_BINDING)) return -ENETDOWN; if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) && !vlan_dev_inherit_address(dev, real_dev)) { err = dev_uc_add(real_dev, dev->dev_addr); if (err < 0) goto out; } if (dev->flags & IFF_ALLMULTI) { err = dev_set_allmulti(real_dev, 1); if (err < 0) goto del_unicast; } if (dev->flags & IFF_PROMISC) { err = dev_set_promiscuity(real_dev, 1); if (err < 0) goto clear_allmulti; } ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr); if (vlan->flags & VLAN_FLAG_GVRP) vlan_gvrp_request_join(dev); if (vlan->flags & VLAN_FLAG_MVRP) vlan_mvrp_request_join(dev); if (netif_carrier_ok(real_dev) && !(vlan->flags & VLAN_FLAG_BRIDGE_BINDING)) netif_carrier_on(dev); return 0; clear_allmulti: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(real_dev, -1); del_unicast: if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); out: netif_carrier_off(dev); return err; } static int vlan_dev_stop(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(real_dev, -1); if (dev->flags & IFF_PROMISC) dev_set_promiscuity(real_dev, -1); if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING)) netif_carrier_off(dev); return 0; } static int vlan_dev_set_mac_address(struct net_device *dev, void *p) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; struct sockaddr *addr = p; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (!(dev->flags & IFF_UP)) goto out; if (!ether_addr_equal(addr->sa_data, real_dev->dev_addr)) { err = dev_uc_add(real_dev, addr->sa_data); if (err < 0) return err; } if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); out: eth_hw_addr_set(dev, addr->sa_data); return 0; } static int vlan_hwtstamp_get(struct net_device *dev, struct kernel_hwtstamp_config *cfg) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; return generic_hwtstamp_get_lower(real_dev, cfg); } static int vlan_hwtstamp_set(struct net_device *dev, struct kernel_hwtstamp_config *cfg, struct netlink_ext_ack *extack) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; if (!net_eq(dev_net(dev), dev_net(real_dev))) return -EOPNOTSUPP; return generic_hwtstamp_set_lower(real_dev, cfg, extack); } static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; struct ifreq ifrr; int err = -EOPNOTSUPP; strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ); ifrr.ifr_ifru = ifr->ifr_ifru; switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) err = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd); break; } if (!err) ifr->ifr_ifru = ifrr.ifr_ifru; return err; } static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int err = 0; if (netif_device_present(real_dev) && ops->ndo_neigh_setup) err = ops->ndo_neigh_setup(real_dev, pa); return err; } #if IS_ENABLED(CONFIG_FCOE) static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid, struct scatterlist *sgl, unsigned int sgc) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = 0; if (ops->ndo_fcoe_ddp_setup) rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc); return rc; } static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int len = 0; if (ops->ndo_fcoe_ddp_done) len = ops->ndo_fcoe_ddp_done(real_dev, xid); return len; } static int vlan_dev_fcoe_enable(struct net_device *dev) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; if (ops->ndo_fcoe_enable) rc = ops->ndo_fcoe_enable(real_dev); return rc; } static int vlan_dev_fcoe_disable(struct net_device *dev) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; if (ops->ndo_fcoe_disable) rc = ops->ndo_fcoe_disable(real_dev); return rc; } static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, struct scatterlist *sgl, unsigned int sgc) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = 0; if (ops->ndo_fcoe_ddp_target) rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc); return rc; } #endif #ifdef NETDEV_FCOE_WWNN static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; if (ops->ndo_fcoe_get_wwn) rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); return rc; } #endif static void vlan_dev_change_rx_flags(struct net_device *dev, int change) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; if (dev->flags & IFF_UP) { if (change & IFF_ALLMULTI) dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); if (change & IFF_PROMISC) dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); } } static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) { dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev); } static __be16 vlan_parse_protocol(const struct sk_buff *skb) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); return __vlan_get_protocol(skb, veth->h_vlan_proto, NULL); } static const struct header_ops vlan_header_ops = { .create = vlan_dev_hard_header, .parse = eth_header_parse, .parse_protocol = vlan_parse_protocol, }; static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned int len) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; if (saddr == NULL) saddr = dev->dev_addr; return dev_hard_header(skb, real_dev, type, daddr, saddr, len); } static const struct header_ops vlan_passthru_header_ops = { .create = vlan_passthru_hard_header, .parse = eth_header_parse, .parse_protocol = vlan_parse_protocol, }; static const struct device_type vlan_type = { .name = "vlan", }; static const struct net_device_ops vlan_netdev_ops; static int vlan_dev_init(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; netif_carrier_off(dev); /* IFF_BROADCAST|IFF_MULTICAST; ??? */ dev->flags = real_dev->flags & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_MASTER | IFF_SLAVE); dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))) | (1<<__LINK_STATE_PRESENT); if (vlan->flags & VLAN_FLAG_BRIDGE_BINDING) dev->state |= (1 << __LINK_STATE_NOCARRIER); dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | NETIF_F_HIGHDMA | NETIF_F_SCTP_CRC | NETIF_F_FCOE_CRC | NETIF_F_FSO; if (real_dev->vlan_features & NETIF_F_HW_MACSEC) dev->hw_features |= NETIF_F_HW_MACSEC; dev->features |= dev->hw_features; dev->lltx = true; dev->fcoe_mtu = true; netif_inherit_tso_max(dev, real_dev); if (dev->features & NETIF_F_VLAN_FEATURES) netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); dev->vlan_features = real_dev->vlan_features & ~(NETIF_F_FCOE_CRC | NETIF_F_FSO); dev->hw_enc_features = vlan_tnl_features(real_dev); dev->mpls_features = real_dev->mpls_features; /* ipv6 shared card related stuff */ dev->dev_id = real_dev->dev_id; if (is_zero_ether_addr(dev->dev_addr)) { eth_hw_addr_set(dev, real_dev->dev_addr); dev->addr_assign_type = NET_ADDR_STOLEN; } if (is_zero_ether_addr(dev->broadcast)) memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); #if IS_ENABLED(CONFIG_FCOE) dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid; #endif dev->needed_headroom = real_dev->needed_headroom; if (vlan_hw_offload_capable(real_dev->features, vlan->vlan_proto)) { dev->header_ops = &vlan_passthru_header_ops; dev->hard_header_len = real_dev->hard_header_len; } else { dev->header_ops = &vlan_header_ops; dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; } dev->netdev_ops = &vlan_netdev_ops; SET_NETDEV_DEVTYPE(dev, &vlan_type); netdev_lockdep_set_classes(dev); vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats); if (!vlan->vlan_pcpu_stats) return -ENOMEM; /* Get vlan's reference to real_dev */ netdev_hold(real_dev, &vlan->dev_tracker, GFP_KERNEL); return 0; } /* Note: this function might be called multiple times for the same device. */ void vlan_dev_free_egress_priority(const struct net_device *dev) { struct vlan_priority_tci_mapping *pm; struct vlan_dev_priv *vlan = vlan_dev_priv(dev); int i; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { while ((pm = vlan->egress_priority_map[i]) != NULL) { vlan->egress_priority_map[i] = pm->next; kfree(pm); } } } static void vlan_dev_uninit(struct net_device *dev) { vlan_dev_free_egress_priority(dev); } static netdev_features_t vlan_dev_fix_features(struct net_device *dev, netdev_features_t features) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; netdev_features_t old_features = features; netdev_features_t lower_features; lower_features = netdev_intersect_features((real_dev->vlan_features | NETIF_F_RXCSUM), real_dev->features); /* Add HW_CSUM setting to preserve user ability to control * checksum offload on the vlan device. */ if (lower_features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) lower_features |= NETIF_F_HW_CSUM; features = netdev_intersect_features(features, lower_features); features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_GSO_SOFTWARE); return features; } static int vlan_ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); return __ethtool_get_link_ksettings(vlan->real_dev, cmd); } static void vlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strscpy(info->driver, vlan_fullname, sizeof(info->driver)); strscpy(info->version, vlan_version, sizeof(info->version)); strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); } static int vlan_ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); return ethtool_get_ts_info_by_layer(vlan->real_dev, info); } static void vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct vlan_pcpu_stats *p; u32 rx_errors = 0, tx_dropped = 0; int i; for_each_possible_cpu(i) { u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; unsigned int start; p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); do { start = u64_stats_fetch_begin(&p->syncp); rxpackets = u64_stats_read(&p->rx_packets); rxbytes = u64_stats_read(&p->rx_bytes); rxmulticast = u64_stats_read(&p->rx_multicast); txpackets = u64_stats_read(&p->tx_packets); txbytes = u64_stats_read(&p->tx_bytes); } while (u64_stats_fetch_retry(&p->syncp, start)); stats->rx_packets += rxpackets; stats->rx_bytes += rxbytes; stats->multicast += rxmulticast; stats->tx_packets += txpackets; stats->tx_bytes += txbytes; /* rx_errors & tx_dropped are u32 */ rx_errors += READ_ONCE(p->rx_errors); tx_dropped += READ_ONCE(p->tx_dropped); } stats->rx_errors = rx_errors; stats->tx_dropped = tx_dropped; } #ifdef CONFIG_NET_POLL_CONTROLLER static void vlan_dev_poll_controller(struct net_device *dev) { return; } static int vlan_dev_netpoll_setup(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; struct netpoll *netpoll; int err = 0; netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); err = -ENOMEM; if (!netpoll) goto out; err = __netpoll_setup(netpoll, real_dev); if (err) { kfree(netpoll); goto out; } vlan->netpoll = netpoll; out: return err; } static void vlan_dev_netpoll_cleanup(struct net_device *dev) { struct vlan_dev_priv *vlan= vlan_dev_priv(dev); struct netpoll *netpoll = vlan->netpoll; if (!netpoll) return; vlan->netpoll = NULL; __netpoll_free(netpoll); } #endif /* CONFIG_NET_POLL_CONTROLLER */ static int vlan_dev_get_iflink(const struct net_device *dev) { const struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; return READ_ONCE(real_dev->ifindex); } static int vlan_dev_fill_forward_path(struct net_device_path_ctx *ctx, struct net_device_path *path) { struct vlan_dev_priv *vlan = vlan_dev_priv(ctx->dev); path->type = DEV_PATH_VLAN; path->encap.id = vlan->vlan_id; path->encap.proto = vlan->vlan_proto; path->dev = ctx->dev; ctx->dev = vlan->real_dev; if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan)) return -ENOSPC; ctx->vlan[ctx->num_vlans].id = vlan->vlan_id; ctx->vlan[ctx->num_vlans].proto = vlan->vlan_proto; ctx->num_vlans++; return 0; } #if IS_ENABLED(CONFIG_MACSEC) static const struct macsec_ops *vlan_get_macsec_ops(const struct macsec_context *ctx) { return vlan_dev_priv(ctx->netdev)->real_dev->macsec_ops; } static int vlan_macsec_offload(int (* const func)(struct macsec_context *), struct macsec_context *ctx) { if (unlikely(!func)) return 0; return (*func)(ctx); } static int vlan_macsec_dev_open(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_dev_open, ctx); } static int vlan_macsec_dev_stop(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_dev_stop, ctx); } static int vlan_macsec_add_secy(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_add_secy, ctx); } static int vlan_macsec_upd_secy(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_upd_secy, ctx); } static int vlan_macsec_del_secy(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_del_secy, ctx); } static int vlan_macsec_add_rxsc(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_add_rxsc, ctx); } static int vlan_macsec_upd_rxsc(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_upd_rxsc, ctx); } static int vlan_macsec_del_rxsc(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_del_rxsc, ctx); } static int vlan_macsec_add_rxsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_add_rxsa, ctx); } static int vlan_macsec_upd_rxsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_upd_rxsa, ctx); } static int vlan_macsec_del_rxsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_del_rxsa, ctx); } static int vlan_macsec_add_txsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_add_txsa, ctx); } static int vlan_macsec_upd_txsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_upd_txsa, ctx); } static int vlan_macsec_del_txsa(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_del_txsa, ctx); } static int vlan_macsec_get_dev_stats(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_get_dev_stats, ctx); } static int vlan_macsec_get_tx_sc_stats(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_get_tx_sc_stats, ctx); } static int vlan_macsec_get_tx_sa_stats(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_get_tx_sa_stats, ctx); } static int vlan_macsec_get_rx_sc_stats(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_get_rx_sc_stats, ctx); } static int vlan_macsec_get_rx_sa_stats(struct macsec_context *ctx) { const struct macsec_ops *ops = vlan_get_macsec_ops(ctx); if (!ops) return -EOPNOTSUPP; return vlan_macsec_offload(ops->mdo_get_rx_sa_stats, ctx); } static const struct macsec_ops macsec_offload_ops = { /* Device wide */ .mdo_dev_open = vlan_macsec_dev_open, .mdo_dev_stop = vlan_macsec_dev_stop, /* SecY */ .mdo_add_secy = vlan_macsec_add_secy, .mdo_upd_secy = vlan_macsec_upd_secy, .mdo_del_secy = vlan_macsec_del_secy, /* Security channels */ .mdo_add_rxsc = vlan_macsec_add_rxsc, .mdo_upd_rxsc = vlan_macsec_upd_rxsc, .mdo_del_rxsc = vlan_macsec_del_rxsc, /* Security associations */ .mdo_add_rxsa = vlan_macsec_add_rxsa, .mdo_upd_rxsa = vlan_macsec_upd_rxsa, .mdo_del_rxsa = vlan_macsec_del_rxsa, .mdo_add_txsa = vlan_macsec_add_txsa, .mdo_upd_txsa = vlan_macsec_upd_txsa, .mdo_del_txsa = vlan_macsec_del_txsa, /* Statistics */ .mdo_get_dev_stats = vlan_macsec_get_dev_stats, .mdo_get_tx_sc_stats = vlan_macsec_get_tx_sc_stats, .mdo_get_tx_sa_stats = vlan_macsec_get_tx_sa_stats, .mdo_get_rx_sc_stats = vlan_macsec_get_rx_sc_stats, .mdo_get_rx_sa_stats = vlan_macsec_get_rx_sa_stats, }; #endif static const struct ethtool_ops vlan_ethtool_ops = { .get_link_ksettings = vlan_ethtool_get_link_ksettings, .get_drvinfo = vlan_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, .get_ts_info = vlan_ethtool_get_ts_info, }; static const struct net_device_ops vlan_netdev_ops = { .ndo_change_mtu = vlan_dev_change_mtu, .ndo_init = vlan_dev_init, .ndo_uninit = vlan_dev_uninit, .ndo_open = vlan_dev_open, .ndo_stop = vlan_dev_stop, .ndo_start_xmit = vlan_dev_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = vlan_dev_set_mac_address, .ndo_set_rx_mode = vlan_dev_set_rx_mode, .ndo_change_rx_flags = vlan_dev_change_rx_flags, .ndo_eth_ioctl = vlan_dev_ioctl, .ndo_neigh_setup = vlan_dev_neigh_setup, .ndo_get_stats64 = vlan_dev_get_stats64, #if IS_ENABLED(CONFIG_FCOE) .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, .ndo_fcoe_enable = vlan_dev_fcoe_enable, .ndo_fcoe_disable = vlan_dev_fcoe_disable, .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, #endif #ifdef NETDEV_FCOE_WWNN .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = vlan_dev_poll_controller, .ndo_netpoll_setup = vlan_dev_netpoll_setup, .ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup, #endif .ndo_fix_features = vlan_dev_fix_features, .ndo_get_iflink = vlan_dev_get_iflink, .ndo_fill_forward_path = vlan_dev_fill_forward_path, .ndo_hwtstamp_get = vlan_hwtstamp_get, .ndo_hwtstamp_set = vlan_hwtstamp_set, }; static void vlan_dev_free(struct net_device *dev) { struct vlan_dev_priv *vlan = vlan_dev_priv(dev); free_percpu(vlan->vlan_pcpu_stats); vlan->vlan_pcpu_stats = NULL; /* Get rid of the vlan's reference to real_dev */ netdev_put(vlan->real_dev, &vlan->dev_tracker); } void vlan_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN | IFF_NO_QUEUE; dev->priv_flags |= IFF_UNICAST_FLT; dev->priv_flags &= ~IFF_TX_SKB_SHARING; netif_keep_dst(dev); dev->netdev_ops = &vlan_netdev_ops; dev->needs_free_netdev = true; dev->priv_destructor = vlan_dev_free; dev->ethtool_ops = &vlan_ethtool_ops; #if IS_ENABLED(CONFIG_MACSEC) dev->macsec_ops = &macsec_offload_ops; #endif dev->min_mtu = 0; dev->max_mtu = ETH_MAX_MTU; eth_zero_addr(dev->broadcast); }
23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (C) B.A.T.M.A.N. contributors: * * Simon Wunderlich */ #ifndef _NET_BATMAN_ADV_BLA_H_ #define _NET_BATMAN_ADV_BLA_H_ #include "main.h" #include <linux/compiler.h> #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/skbuff.h> #include <linux/stddef.h> #include <linux/types.h> /** * batadv_bla_is_loopdetect_mac() - check if the mac address is from a loop * detect frame sent by bridge loop avoidance * @mac: mac address to check * * Return: true if the it looks like a loop detect frame * (mac starts with BA:BE), false otherwise */ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac) { if (mac[0] == 0xba && mac[1] == 0xbe) return true; return false; } #ifdef CONFIG_BATMAN_ADV_BLA bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, int packet_type); bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid); bool batadv_bla_is_backbone_gw(struct sk_buff *skb, struct batadv_orig_node *orig_node, int hdr_size); int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb); int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb); bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig, unsigned short vid); bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, struct sk_buff *skb); void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, struct batadv_hard_iface *oldif); void batadv_bla_status_update(struct net_device *net_dev); int batadv_bla_init(struct batadv_priv *bat_priv); void batadv_bla_free(struct batadv_priv *bat_priv); #ifdef CONFIG_BATMAN_ADV_DAT bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, unsigned short vid); #endif #define BATADV_BLA_CRC_INIT 0 #else /* ifdef CONFIG_BATMAN_ADV_BLA */ static inline bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, int packet_type) { return false; } static inline bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) { return false; } static inline bool batadv_bla_is_backbone_gw(struct sk_buff *skb, struct batadv_orig_node *orig_node, int hdr_size) { return false; } static inline bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig, unsigned short vid) { return false; } static inline bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, struct sk_buff *skb) { return false; } static inline void batadv_bla_update_orig_address(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, struct batadv_hard_iface *oldif) { } static inline int batadv_bla_init(struct batadv_priv *bat_priv) { return 1; } static inline void batadv_bla_free(struct batadv_priv *bat_priv) { } static inline int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb) { return -EOPNOTSUPP; } static inline int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb) { return -EOPNOTSUPP; } static inline bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, unsigned short vid) { return true; } #endif /* ifdef CONFIG_BATMAN_ADV_BLA */ #endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
1 2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 // SPDX-License-Identifier: GPL-2.0-or-later /* * Stadia controller rumble support. * * Copyright 2023 Google LLC */ #include <linux/hid.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include "hid-ids.h" #define STADIA_FF_REPORT_ID 5 struct stadiaff_device { struct hid_device *hid; struct hid_report *report; spinlock_t lock; bool removed; uint16_t strong_magnitude; uint16_t weak_magnitude; struct work_struct work; }; static void stadiaff_work(struct work_struct *work) { struct stadiaff_device *stadiaff = container_of(work, struct stadiaff_device, work); struct hid_field *rumble_field = stadiaff->report->field[0]; unsigned long flags; spin_lock_irqsave(&stadiaff->lock, flags); rumble_field->value[0] = stadiaff->strong_magnitude; rumble_field->value[1] = stadiaff->weak_magnitude; spin_unlock_irqrestore(&stadiaff->lock, flags); hid_hw_request(stadiaff->hid, stadiaff->report, HID_REQ_SET_REPORT); } static int stadiaff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct stadiaff_device *stadiaff = hid_get_drvdata(hid); unsigned long flags; spin_lock_irqsave(&stadiaff->lock, flags); if (!stadiaff->removed) { stadiaff->strong_magnitude = effect->u.rumble.strong_magnitude; stadiaff->weak_magnitude = effect->u.rumble.weak_magnitude; schedule_work(&stadiaff->work); } spin_unlock_irqrestore(&stadiaff->lock, flags); return 0; } static int stadiaff_init(struct hid_device *hid) { struct stadiaff_device *stadiaff; struct hid_report *report; struct hid_input *hidinput; struct input_dev *dev; int error; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; report = hid_validate_values(hid, HID_OUTPUT_REPORT, STADIA_FF_REPORT_ID, 0, 2); if (!report) return -ENODEV; stadiaff = devm_kzalloc(&hid->dev, sizeof(struct stadiaff_device), GFP_KERNEL); if (!stadiaff) return -ENOMEM; hid_set_drvdata(hid, stadiaff); input_set_capability(dev, EV_FF, FF_RUMBLE); error = input_ff_create_memless(dev, NULL, stadiaff_play); if (error) return error; stadiaff->removed = false; stadiaff->hid = hid; stadiaff->report = report; INIT_WORK(&stadiaff->work, stadiaff_work); spin_lock_init(&stadiaff->lock); hid_info(hid, "Force Feedback for Google Stadia controller\n"); return 0; } static int stadia_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); return ret; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); return ret; } ret = stadiaff_init(hdev); if (ret) { hid_err(hdev, "force feedback init failed\n"); hid_hw_stop(hdev); return ret; } return 0; } static void stadia_remove(struct hid_device *hid) { struct stadiaff_device *stadiaff = hid_get_drvdata(hid); unsigned long flags; spin_lock_irqsave(&stadiaff->lock, flags); stadiaff->removed = true; spin_unlock_irqrestore(&stadiaff->lock, flags); cancel_work_sync(&stadiaff->work); hid_hw_stop(hid); } static const struct hid_device_id stadia_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STADIA) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STADIA) }, { } }; MODULE_DEVICE_TABLE(hid, stadia_devices); static struct hid_driver stadia_driver = { .name = "stadia", .id_table = stadia_devices, .probe = stadia_probe, .remove = stadia_remove, }; module_hid_driver(stadia_driver); MODULE_DESCRIPTION("Google Stadia controller rumble support."); MODULE_LICENSE("GPL");
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1495 1445 52 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 // SPDX-License-Identifier: GPL-2.0-or-later /* * LAPB release 002 * * This code REQUIRES 2.1.15 or higher/ NET3.038 * * History * LAPB 001 Jonathan Naylor Started Coding * LAPB 002 Jonathan Naylor New timer architecture. * 2000-10-29 Henner Eisen lapb_data_indication() return status. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/inet.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/uaccess.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/stat.h> #include <linux/init.h> #include <net/lapb.h> static LIST_HEAD(lapb_list); static DEFINE_RWLOCK(lapb_list_lock); /* * Free an allocated lapb control block. */ static void lapb_free_cb(struct lapb_cb *lapb) { kfree(lapb); } static __inline__ void lapb_hold(struct lapb_cb *lapb) { refcount_inc(&lapb->refcnt); } static __inline__ void lapb_put(struct lapb_cb *lapb) { if (refcount_dec_and_test(&lapb->refcnt)) lapb_free_cb(lapb); } /* * Socket removal during an interrupt is now safe. */ static void __lapb_remove_cb(struct lapb_cb *lapb) { if (lapb->node.next) { list_del(&lapb->node); lapb_put(lapb); } } /* * Add a socket to the bound sockets list. */ static void __lapb_insert_cb(struct lapb_cb *lapb) { list_add(&lapb->node, &lapb_list); lapb_hold(lapb); } static struct lapb_cb *__lapb_devtostruct(struct net_device *dev) { struct lapb_cb *lapb, *use = NULL; list_for_each_entry(lapb, &lapb_list, node) { if (lapb->dev == dev) { use = lapb; break; } } if (use) lapb_hold(use); return use; } static struct lapb_cb *lapb_devtostruct(struct net_device *dev) { struct lapb_cb *rc; read_lock_bh(&lapb_list_lock); rc = __lapb_devtostruct(dev); read_unlock_bh(&lapb_list_lock); return rc; } /* * Create an empty LAPB control block. */ static struct lapb_cb *lapb_create_cb(void) { struct lapb_cb *lapb = kzalloc(sizeof(*lapb), GFP_ATOMIC); if (!lapb) goto out; skb_queue_head_init(&lapb->write_queue); skb_queue_head_init(&lapb->ack_queue); timer_setup(&lapb->t1timer, NULL, 0); timer_setup(&lapb->t2timer, NULL, 0); lapb->t1timer_running = false; lapb->t2timer_running = false; lapb->t1 = LAPB_DEFAULT_T1; lapb->t2 = LAPB_DEFAULT_T2; lapb->n2 = LAPB_DEFAULT_N2; lapb->mode = LAPB_DEFAULT_MODE; lapb->window = LAPB_DEFAULT_WINDOW; lapb->state = LAPB_STATE_0; spin_lock_init(&lapb->lock); refcount_set(&lapb->refcnt, 1); out: return lapb; } int lapb_register(struct net_device *dev, const struct lapb_register_struct *callbacks) { struct lapb_cb *lapb; int rc = LAPB_BADTOKEN; write_lock_bh(&lapb_list_lock); lapb = __lapb_devtostruct(dev); if (lapb) { lapb_put(lapb); goto out; } lapb = lapb_create_cb(); rc = LAPB_NOMEM; if (!lapb) goto out; lapb->dev = dev; lapb->callbacks = callbacks; __lapb_insert_cb(lapb); lapb_start_t1timer(lapb); rc = LAPB_OK; out: write_unlock_bh(&lapb_list_lock); return rc; } EXPORT_SYMBOL(lapb_register); int lapb_unregister(struct net_device *dev) { struct lapb_cb *lapb; int rc = LAPB_BADTOKEN; write_lock_bh(&lapb_list_lock); lapb = __lapb_devtostruct(dev); if (!lapb) goto out; lapb_put(lapb); /* Wait for other refs to "lapb" to drop */ while (refcount_read(&lapb->refcnt) > 2) usleep_range(1, 10); spin_lock_bh(&lapb->lock); lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); lapb_clear_queues(lapb); spin_unlock_bh(&lapb->lock); /* Wait for running timers to stop */ del_timer_sync(&lapb->t1timer); del_timer_sync(&lapb->t2timer); __lapb_remove_cb(lapb); lapb_put(lapb); rc = LAPB_OK; out: write_unlock_bh(&lapb_list_lock); return rc; } EXPORT_SYMBOL(lapb_unregister); int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms) { int rc = LAPB_BADTOKEN; struct lapb_cb *lapb = lapb_devtostruct(dev); if (!lapb) goto out; spin_lock_bh(&lapb->lock); parms->t1 = lapb->t1 / HZ; parms->t2 = lapb->t2 / HZ; parms->n2 = lapb->n2; parms->n2count = lapb->n2count; parms->state = lapb->state; parms->window = lapb->window; parms->mode = lapb->mode; if (!timer_pending(&lapb->t1timer)) parms->t1timer = 0; else parms->t1timer = (lapb->t1timer.expires - jiffies) / HZ; if (!timer_pending(&lapb->t2timer)) parms->t2timer = 0; else parms->t2timer = (lapb->t2timer.expires - jiffies) / HZ; spin_unlock_bh(&lapb->lock); lapb_put(lapb); rc = LAPB_OK; out: return rc; } EXPORT_SYMBOL(lapb_getparms); int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms) { int rc = LAPB_BADTOKEN; struct lapb_cb *lapb = lapb_devtostruct(dev); if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = LAPB_INVALUE; if (parms->t1 < 1 || parms->t2 < 1 || parms->n2 < 1) goto out_put; if (lapb->state == LAPB_STATE_0) { if (parms->mode & LAPB_EXTENDED) { if (parms->window < 1 || parms->window > 127) goto out_put; } else { if (parms->window < 1 || parms->window > 7) goto out_put; } lapb->mode = parms->mode; lapb->window = parms->window; } lapb->t1 = parms->t1 * HZ; lapb->t2 = parms->t2 * HZ; lapb->n2 = parms->n2; rc = LAPB_OK; out_put: spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_setparms); int lapb_connect_request(struct net_device *dev) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = LAPB_OK; if (lapb->state == LAPB_STATE_1) goto out_put; rc = LAPB_CONNECTED; if (lapb->state == LAPB_STATE_3 || lapb->state == LAPB_STATE_4) goto out_put; lapb_establish_data_link(lapb); lapb_dbg(0, "(%p) S0 -> S1\n", lapb->dev); lapb->state = LAPB_STATE_1; rc = LAPB_OK; out_put: spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_connect_request); static int __lapb_disconnect_request(struct lapb_cb *lapb) { switch (lapb->state) { case LAPB_STATE_0: return LAPB_NOTCONNECTED; case LAPB_STATE_1: lapb_dbg(1, "(%p) S1 TX DISC(1)\n", lapb->dev); lapb_dbg(0, "(%p) S1 -> S0\n", lapb->dev); lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); lapb->state = LAPB_STATE_0; lapb_start_t1timer(lapb); return LAPB_NOTCONNECTED; case LAPB_STATE_2: return LAPB_OK; } lapb_clear_queues(lapb); lapb->n2count = 0; lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); lapb_start_t1timer(lapb); lapb_stop_t2timer(lapb); lapb->state = LAPB_STATE_2; lapb_dbg(1, "(%p) S3 DISC(1)\n", lapb->dev); lapb_dbg(0, "(%p) S3 -> S2\n", lapb->dev); return LAPB_OK; } int lapb_disconnect_request(struct net_device *dev) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = __lapb_disconnect_request(lapb); spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_disconnect_request); int lapb_data_request(struct net_device *dev, struct sk_buff *skb) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (!lapb) goto out; spin_lock_bh(&lapb->lock); rc = LAPB_NOTCONNECTED; if (lapb->state != LAPB_STATE_3 && lapb->state != LAPB_STATE_4) goto out_put; skb_queue_tail(&lapb->write_queue, skb); lapb_kick(lapb); rc = LAPB_OK; out_put: spin_unlock_bh(&lapb->lock); lapb_put(lapb); out: return rc; } EXPORT_SYMBOL(lapb_data_request); int lapb_data_received(struct net_device *dev, struct sk_buff *skb) { struct lapb_cb *lapb = lapb_devtostruct(dev); int rc = LAPB_BADTOKEN; if (lapb) { spin_lock_bh(&lapb->lock); lapb_data_input(lapb, skb); spin_unlock_bh(&lapb->lock); lapb_put(lapb); rc = LAPB_OK; } return rc; } EXPORT_SYMBOL(lapb_data_received); void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->connect_confirmation) lapb->callbacks->connect_confirmation(lapb->dev, reason); } void lapb_connect_indication(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->connect_indication) lapb->callbacks->connect_indication(lapb->dev, reason); } void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->disconnect_confirmation) lapb->callbacks->disconnect_confirmation(lapb->dev, reason); } void lapb_disconnect_indication(struct lapb_cb *lapb, int reason) { if (lapb->callbacks->disconnect_indication) lapb->callbacks->disconnect_indication(lapb->dev, reason); } int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb) { if (lapb->callbacks->data_indication) return lapb->callbacks->data_indication(lapb->dev, skb); kfree_skb(skb); return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */ } int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) { int used = 0; if (lapb->callbacks->data_transmit) { lapb->callbacks->data_transmit(lapb->dev, skb); used = 1; } return used; } /* Handle device status changes. */ static int lapb_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct lapb_cb *lapb; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (dev->type != ARPHRD_X25) return NOTIFY_DONE; lapb = lapb_devtostruct(dev); if (!lapb) return NOTIFY_DONE; spin_lock_bh(&lapb->lock); switch (event) { case NETDEV_UP: lapb_dbg(0, "(%p) Interface up: %s\n", dev, dev->name); if (netif_carrier_ok(dev)) { lapb_dbg(0, "(%p): Carrier is already up: %s\n", dev, dev->name); if (lapb->mode & LAPB_DCE) { lapb_start_t1timer(lapb); } else { if (lapb->state == LAPB_STATE_0) { lapb->state = LAPB_STATE_1; lapb_establish_data_link(lapb); } } } break; case NETDEV_GOING_DOWN: if (netif_carrier_ok(dev)) __lapb_disconnect_request(lapb); break; case NETDEV_DOWN: lapb_dbg(0, "(%p) Interface down: %s\n", dev, dev->name); lapb_dbg(0, "(%p) S%d -> S0\n", dev, lapb->state); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb->n2count = 0; lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); break; case NETDEV_CHANGE: if (netif_carrier_ok(dev)) { lapb_dbg(0, "(%p): Carrier detected: %s\n", dev, dev->name); if (lapb->mode & LAPB_DCE) { lapb_start_t1timer(lapb); } else { if (lapb->state == LAPB_STATE_0) { lapb->state = LAPB_STATE_1; lapb_establish_data_link(lapb); } } } else { lapb_dbg(0, "(%p) Carrier lost: %s\n", dev, dev->name); lapb_dbg(0, "(%p) S%d -> S0\n", dev, lapb->state); lapb_clear_queues(lapb); lapb->state = LAPB_STATE_0; lapb->n2count = 0; lapb_stop_t1timer(lapb); lapb_stop_t2timer(lapb); } break; } spin_unlock_bh(&lapb->lock); lapb_put(lapb); return NOTIFY_DONE; } static struct notifier_block lapb_dev_notifier = { .notifier_call = lapb_device_event, }; static int __init lapb_init(void) { return register_netdevice_notifier(&lapb_dev_notifier); } static void __exit lapb_exit(void) { WARN_ON(!list_empty(&lapb_list)); unregister_netdevice_notifier(&lapb_dev_notifier); } MODULE_AUTHOR("Jonathan Naylor <g4klx@g4klx.demon.co.uk>"); MODULE_DESCRIPTION("The X.25 Link Access Procedure B link layer protocol"); MODULE_LICENSE("GPL"); module_init(lapb_init); module_exit(lapb_exit);
4 4 253 252 152 52 9 4 53 2 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_TLBFLUSH_H #define _ASM_X86_TLBFLUSH_H #include <linux/mm_types.h> #include <linux/mmu_notifier.h> #include <linux/sched.h> #include <asm/processor.h> #include <asm/cpufeature.h> #include <asm/special_insns.h> #include <asm/smp.h> #include <asm/invpcid.h> #include <asm/pti.h> #include <asm/processor-flags.h> #include <asm/pgtable.h> DECLARE_PER_CPU(u64, tlbstate_untag_mask); void __flush_tlb_all(void); #define TLB_FLUSH_ALL -1UL #define TLB_GENERATION_INVALID 0 void cr4_update_irqsoff(unsigned long set, unsigned long clear); unsigned long cr4_read_shadow(void); /* Set in this cpu's CR4. */ static inline void cr4_set_bits_irqsoff(unsigned long mask) { cr4_update_irqsoff(mask, 0); } /* Clear in this cpu's CR4. */ static inline void cr4_clear_bits_irqsoff(unsigned long mask) { cr4_update_irqsoff(0, mask); } /* Set in this cpu's CR4. */ static inline void cr4_set_bits(unsigned long mask) { unsigned long flags; local_irq_save(flags); cr4_set_bits_irqsoff(mask); local_irq_restore(flags); } /* Clear in this cpu's CR4. */ static inline void cr4_clear_bits(unsigned long mask) { unsigned long flags; local_irq_save(flags); cr4_clear_bits_irqsoff(mask); local_irq_restore(flags); } #ifndef MODULE /* * 6 because 6 should be plenty and struct tlb_state will fit in two cache * lines. */ #define TLB_NR_DYN_ASIDS 6 struct tlb_context { u64 ctx_id; u64 tlb_gen; }; struct tlb_state { /* * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts * are on. This means that it may not match current->active_mm, * which will contain the previous user mm when we're in lazy TLB * mode even if we've already switched back to swapper_pg_dir. * * During switch_mm_irqs_off(), loaded_mm will be set to * LOADED_MM_SWITCHING during the brief interrupts-off window * when CR3 and loaded_mm would otherwise be inconsistent. This * is for nmi_uaccess_okay()'s benefit. */ struct mm_struct *loaded_mm; #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL) /* Last user mm for optimizing IBPB */ union { struct mm_struct *last_user_mm; unsigned long last_user_mm_spec; }; u16 loaded_mm_asid; u16 next_asid; /* * If set we changed the page tables in such a way that we * needed an invalidation of all contexts (aka. PCIDs / ASIDs). * This tells us to go invalidate all the non-loaded ctxs[] * on the next context switch. * * The current ctx was kept up-to-date as it ran and does not * need to be invalidated. */ bool invalidate_other; #ifdef CONFIG_ADDRESS_MASKING /* * Active LAM mode. * * X86_CR3_LAM_U57/U48 shifted right by X86_CR3_LAM_U57_BIT or 0 if LAM * disabled. */ u8 lam; #endif /* * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate * the corresponding user PCID needs a flush next time we * switch to it; see SWITCH_TO_USER_CR3. */ unsigned short user_pcid_flush_mask; /* * Access to this CR4 shadow and to H/W CR4 is protected by * disabling interrupts when modifying either one. */ unsigned long cr4; /* * This is a list of all contexts that might exist in the TLB. * There is one per ASID that we use, and the ASID (what the * CPU calls PCID) is the index into ctxts. * * For each context, ctx_id indicates which mm the TLB's user * entries came from. As an invariant, the TLB will never * contain entries that are out-of-date as when that mm reached * the tlb_gen in the list. * * To be clear, this means that it's legal for the TLB code to * flush the TLB without updating tlb_gen. This can happen * (for now, at least) due to paravirt remote flushes. * * NB: context 0 is a bit special, since it's also used by * various bits of init code. This is fine -- code that * isn't aware of PCID will end up harmlessly flushing * context 0. */ struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; }; DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate); struct tlb_state_shared { /* * We can be in one of several states: * * - Actively using an mm. Our CPU's bit will be set in * mm_cpumask(loaded_mm) and is_lazy == false; * * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit * will not be set in mm_cpumask(&init_mm) and is_lazy == false. * * - Lazily using a real mm. loaded_mm != &init_mm, our bit * is set in mm_cpumask(loaded_mm), but is_lazy == true. * We're heuristically guessing that the CR3 load we * skipped more than makes up for the overhead added by * lazy mode. */ bool is_lazy; }; DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared); bool nmi_uaccess_okay(void); #define nmi_uaccess_okay nmi_uaccess_okay /* Initialize cr4 shadow for this CPU. */ static inline void cr4_init_shadow(void) { this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); } extern unsigned long mmu_cr4_features; extern u32 *trampoline_cr4_features; extern void initialize_tlbstate_and_flush(void); /* * TLB flushing: * * - flush_tlb_all() flushes all processes TLBs * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages * - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus * * ..but the i386 has somewhat limited tlb flushing capabilities, * and page-granular flushes are available only on i486 and up. */ struct flush_tlb_info { /* * We support several kinds of flushes. * * - Fully flush a single mm. .mm will be set, .end will be * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to * which the IPI sender is trying to catch us up. * * - Partially flush a single mm. .mm will be set, .start and * .end will indicate the range, and .new_tlb_gen will be set * such that the changes between generation .new_tlb_gen-1 and * .new_tlb_gen are entirely contained in the indicated range. * * - Fully flush all mms whose tlb_gens have been updated. .mm * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen * will be zero. */ struct mm_struct *mm; unsigned long start; unsigned long end; u64 new_tlb_gen; unsigned int initiating_cpu; u8 stride_shift; u8 freed_tables; }; void flush_tlb_local(void); void flush_tlb_one_user(unsigned long addr); void flush_tlb_one_kernel(unsigned long addr); void flush_tlb_multi(const struct cpumask *cpumask, const struct flush_tlb_info *info); #ifdef CONFIG_PARAVIRT #include <asm/paravirt.h> #endif #define flush_tlb_mm(mm) \ flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true) #define flush_tlb_range(vma, start, end) \ flush_tlb_mm_range((vma)->vm_mm, start, end, \ ((vma)->vm_flags & VM_HUGETLB) \ ? huge_page_shift(hstate_vma(vma)) \ : PAGE_SHIFT, false) extern void flush_tlb_all(void); extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int stride_shift, bool freed_tables); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) { flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); } static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm) { bool should_defer = false; /* If remote CPUs need to be flushed then defer batch the flush */ if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids) should_defer = true; put_cpu(); return should_defer; } static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) { /* * Bump the generation count. This also serves as a full barrier * that synchronizes with switch_mm(): callers are required to order * their read of mm_cpumask after their writes to the paging * structures. */ return atomic64_inc_return(&mm->context.tlb_gen); } static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, struct mm_struct *mm, unsigned long uaddr) { inc_mm_tlb_gen(mm); cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL); } static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm) { flush_tlb_mm(mm); } extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); static inline bool pte_flags_need_flush(unsigned long oldflags, unsigned long newflags, bool ignore_access) { /* * Flags that require a flush when cleared but not when they are set. * Only include flags that would not trigger spurious page-faults. * Non-present entries are not cached. Hardware would set the * dirty/access bit if needed without a fault. */ const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED; const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3 | _PAGE_SOFTW4 | _PAGE_SAVED_DIRTY; const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT | _PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 | _PAGE_PKEY_BIT2 | _PAGE_PKEY_BIT3 | _PAGE_NX; unsigned long diff = oldflags ^ newflags; BUILD_BUG_ON(flush_on_clear & software_flags); BUILD_BUG_ON(flush_on_clear & flush_on_change); BUILD_BUG_ON(flush_on_change & software_flags); /* Ignore software flags */ diff &= ~software_flags; if (ignore_access) diff &= ~_PAGE_ACCESSED; /* * Did any of the 'flush_on_clear' flags was clleared set from between * 'oldflags' and 'newflags'? */ if (diff & oldflags & flush_on_clear) return true; /* Flush on modified flags. */ if (diff & flush_on_change) return true; /* Ensure there are no flags that were left behind */ if (IS_ENABLED(CONFIG_DEBUG_VM) && (diff & ~(flush_on_clear | software_flags | flush_on_change))) { VM_WARN_ON_ONCE(1); return true; } return false; } /* * pte_needs_flush() checks whether permissions were demoted and require a * flush. It should only be used for userspace PTEs. */ static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte) { /* !PRESENT -> * ; no need for flush */ if (!(pte_flags(oldpte) & _PAGE_PRESENT)) return false; /* PFN changed ; needs flush */ if (pte_pfn(oldpte) != pte_pfn(newpte)) return true; /* * check PTE flags; ignore access-bit; see comment in * ptep_clear_flush_young(). */ return pte_flags_need_flush(pte_flags(oldpte), pte_flags(newpte), true); } #define pte_needs_flush pte_needs_flush /* * huge_pmd_needs_flush() checks whether permissions were demoted and require a * flush. It should only be used for userspace huge PMDs. */ static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) { /* !PRESENT -> * ; no need for flush */ if (!(pmd_flags(oldpmd) & _PAGE_PRESENT)) return false; /* PFN changed ; needs flush */ if (pmd_pfn(oldpmd) != pmd_pfn(newpmd)) return true; /* * check PMD flags; do not ignore access-bit; see * pmdp_clear_flush_young(). */ return pte_flags_need_flush(pmd_flags(oldpmd), pmd_flags(newpmd), false); } #define huge_pmd_needs_flush huge_pmd_needs_flush #ifdef CONFIG_ADDRESS_MASKING static inline u64 tlbstate_lam_cr3_mask(void) { u64 lam = this_cpu_read(cpu_tlbstate.lam); return lam << X86_CR3_LAM_U57_BIT; } static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask) { this_cpu_write(cpu_tlbstate.lam, lam >> X86_CR3_LAM_U57_BIT); this_cpu_write(tlbstate_untag_mask, untag_mask); } #else static inline u64 tlbstate_lam_cr3_mask(void) { return 0; } static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask) { } #endif #endif /* !MODULE */ static inline void __native_tlb_flush_global(unsigned long cr4) { native_write_cr4(cr4 ^ X86_CR4_PGE); native_write_cr4(cr4); } #endif /* _ASM_X86_TLBFLUSH_H */
12 12 4 6 2 6 1 2 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 // SPDX-License-Identifier: GPL-2.0-only #include <linux/slab.h> #include <linux/stat.h> #include <linux/sched/xacct.h> #include <linux/fcntl.h> #include <linux/file.h> #include <linux/uio.h> #include <linux/fsnotify.h> #include <linux/security.h> #include <linux/export.h> #include <linux/syscalls.h> #include <linux/pagemap.h> #include <linux/splice.h> #include <linux/compat.h> #include <linux/mount.h> #include <linux/fs.h> #include <linux/dax.h> #include <linux/overflow.h> #include "internal.h" #include <linux/uaccess.h> #include <asm/unistd.h> /* * Performs necessary checks before doing a clone. * * Can adjust amount of bytes to clone via @req_count argument. * Returns appropriate error code that caller should return or * zero in case the clone should be allowed. */ static int generic_remap_checks(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *req_count, unsigned int remap_flags) { struct inode *inode_in = file_in->f_mapping->host; struct inode *inode_out = file_out->f_mapping->host; uint64_t count = *req_count; uint64_t bcount; loff_t size_in, size_out; loff_t bs = inode_out->i_sb->s_blocksize; int ret; /* The start of both ranges must be aligned to an fs block. */ if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs)) return -EINVAL; /* Ensure offsets don't wrap. */ if (pos_in + count < pos_in || pos_out + count < pos_out) return -EINVAL; size_in = i_size_read(inode_in); size_out = i_size_read(inode_out); /* Dedupe requires both ranges to be within EOF. */ if ((remap_flags & REMAP_FILE_DEDUP) && (pos_in >= size_in || pos_in + count > size_in || pos_out >= size_out || pos_out + count > size_out)) return -EINVAL; /* Ensure the infile range is within the infile. */ if (pos_in >= size_in) return -EINVAL; count = min(count, size_in - (uint64_t)pos_in); ret = generic_write_check_limits(file_out, pos_out, &count); if (ret) return ret; /* * If the user wanted us to link to the infile's EOF, round up to the * next block boundary for this check. * * Otherwise, make sure the count is also block-aligned, having * already confirmed the starting offsets' block alignment. */ if (pos_in + count == size_in && (!(remap_flags & REMAP_FILE_DEDUP) || pos_out + count == size_out)) { bcount = ALIGN(size_in, bs) - pos_in; } else { if (!IS_ALIGNED(count, bs)) count = ALIGN_DOWN(count, bs); bcount = count; } /* Don't allow overlapped cloning within the same file. */ if (inode_in == inode_out && pos_out + bcount > pos_in && pos_out < pos_in + bcount) return -EINVAL; /* * We shortened the request but the caller can't deal with that, so * bounce the request back to userspace. */ if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN)) return -EINVAL; *req_count = count; return 0; } int remap_verify_area(struct file *file, loff_t pos, loff_t len, bool write) { int mask = write ? MAY_WRITE : MAY_READ; loff_t tmp; int ret; if (unlikely(pos < 0 || len < 0)) return -EINVAL; if (unlikely(check_add_overflow(pos, len, &tmp))) return -EINVAL; ret = security_file_permission(file, mask); if (ret) return ret; return fsnotify_file_area_perm(file, mask, &pos, len); } EXPORT_SYMBOL_GPL(remap_verify_area); /* * Ensure that we don't remap a partial EOF block in the middle of something * else. Assume that the offsets have already been checked for block * alignment. * * For clone we only link a partial EOF block above or at the destination file's * EOF. For deduplication we accept a partial EOF block only if it ends at the * destination file's EOF (can not link it into the middle of a file). * * Shorten the request if possible. */ static int generic_remap_check_len(struct inode *inode_in, struct inode *inode_out, loff_t pos_out, loff_t *len, unsigned int remap_flags) { u64 blkmask = i_blocksize(inode_in) - 1; loff_t new_len = *len; if ((*len & blkmask) == 0) return 0; if (pos_out + *len < i_size_read(inode_out)) new_len &= ~blkmask; if (new_len == *len) return 0; if (remap_flags & REMAP_FILE_CAN_SHORTEN) { *len = new_len; return 0; } return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; } /* Read a page's worth of file data into the page cache. */ static struct folio *vfs_dedupe_get_folio(struct file *file, loff_t pos) { return read_mapping_folio(file->f_mapping, pos >> PAGE_SHIFT, file); } /* * Lock two folios, ensuring that we lock in offset order if the folios * are from the same file. */ static void vfs_lock_two_folios(struct folio *folio1, struct folio *folio2) { /* Always lock in order of increasing index. */ if (folio1->index > folio2->index) swap(folio1, folio2); folio_lock(folio1); if (folio1 != folio2) folio_lock(folio2); } /* Unlock two folios, being careful not to unlock the same folio twice. */ static void vfs_unlock_two_folios(struct folio *folio1, struct folio *folio2) { folio_unlock(folio1); if (folio1 != folio2) folio_unlock(folio2); } /* * Compare extents of two files to see if they are the same. * Caller must have locked both inodes to prevent write races. */ static int vfs_dedupe_file_range_compare(struct file *src, loff_t srcoff, struct file *dest, loff_t dstoff, loff_t len, bool *is_same) { bool same = true; int error = -EINVAL; while (len) { struct folio *src_folio, *dst_folio; void *src_addr, *dst_addr; loff_t cmp_len = min(PAGE_SIZE - offset_in_page(srcoff), PAGE_SIZE - offset_in_page(dstoff)); cmp_len = min(cmp_len, len); if (cmp_len <= 0) goto out_error; src_folio = vfs_dedupe_get_folio(src, srcoff); if (IS_ERR(src_folio)) { error = PTR_ERR(src_folio); goto out_error; } dst_folio = vfs_dedupe_get_folio(dest, dstoff); if (IS_ERR(dst_folio)) { error = PTR_ERR(dst_folio); folio_put(src_folio); goto out_error; } vfs_lock_two_folios(src_folio, dst_folio); /* * Now that we've locked both folios, make sure they're still * mapped to the file data we're interested in. If not, * someone is invalidating pages on us and we lose. */ if (!folio_test_uptodate(src_folio) || !folio_test_uptodate(dst_folio) || src_folio->mapping != src->f_mapping || dst_folio->mapping != dest->f_mapping) { same = false; goto unlock; } src_addr = kmap_local_folio(src_folio, offset_in_folio(src_folio, srcoff)); dst_addr = kmap_local_folio(dst_folio, offset_in_folio(dst_folio, dstoff)); flush_dcache_folio(src_folio); flush_dcache_folio(dst_folio); if (memcmp(src_addr, dst_addr, cmp_len)) same = false; kunmap_local(dst_addr); kunmap_local(src_addr); unlock: vfs_unlock_two_folios(src_folio, dst_folio); folio_put(dst_folio); folio_put(src_folio); if (!same) break; srcoff += cmp_len; dstoff += cmp_len; len -= cmp_len; } *is_same = same; return 0; out_error: return error; } /* * Check that the two inodes are eligible for cloning, the ranges make * sense, and then flush all dirty data. Caller must ensure that the * inodes have been locked against any other modifications. * * If there's an error, then the usual negative error code is returned. * Otherwise returns 0 with *len set to the request length. */ int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *len, unsigned int remap_flags, const struct iomap_ops *dax_read_ops) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); bool same_inode = (inode_in == inode_out); int ret; /* Don't touch certain kinds of inodes */ if (IS_IMMUTABLE(inode_out)) return -EPERM; if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out)) return -ETXTBSY; /* Don't reflink dirs, pipes, sockets... */ if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) return -EISDIR; if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) return -EINVAL; /* Zero length dedupe exits immediately; reflink goes to EOF. */ if (*len == 0) { loff_t isize = i_size_read(inode_in); if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize) return 0; if (pos_in > isize) return -EINVAL; *len = isize - pos_in; if (*len == 0) return 0; } /* Check that we don't violate system file offset limits. */ ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len, remap_flags); if (ret || *len == 0) return ret; /* Wait for the completion of any pending IOs on both files */ inode_dio_wait(inode_in); if (!same_inode) inode_dio_wait(inode_out); ret = filemap_write_and_wait_range(inode_in->i_mapping, pos_in, pos_in + *len - 1); if (ret) return ret; ret = filemap_write_and_wait_range(inode_out->i_mapping, pos_out, pos_out + *len - 1); if (ret) return ret; /* * Check that the extents are the same. */ if (remap_flags & REMAP_FILE_DEDUP) { bool is_same = false; if (!IS_DAX(inode_in)) ret = vfs_dedupe_file_range_compare(file_in, pos_in, file_out, pos_out, *len, &is_same); else if (dax_read_ops) ret = dax_dedupe_file_range_compare(inode_in, pos_in, inode_out, pos_out, *len, &is_same, dax_read_ops); else return -EINVAL; if (ret) return ret; if (!is_same) return -EBADE; } ret = generic_remap_check_len(inode_in, inode_out, pos_out, len, remap_flags); if (ret || *len == 0) return ret; /* If can't alter the file contents, we're done. */ if (!(remap_flags & REMAP_FILE_DEDUP)) ret = file_modified(file_out); return ret; } int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *len, unsigned int remap_flags) { return __generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, len, remap_flags, NULL); } EXPORT_SYMBOL(generic_remap_file_range_prep); loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags) { loff_t ret; WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP); if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) return -EXDEV; ret = generic_file_rw_checks(file_in, file_out); if (ret < 0) return ret; if (!file_in->f_op->remap_file_range) return -EOPNOTSUPP; ret = remap_verify_area(file_in, pos_in, len, false); if (ret) return ret; ret = remap_verify_area(file_out, pos_out, len, true); if (ret) return ret; file_start_write(file_out); ret = file_in->f_op->remap_file_range(file_in, pos_in, file_out, pos_out, len, remap_flags); file_end_write(file_out); if (ret < 0) return ret; fsnotify_access(file_in); fsnotify_modify(file_out); return ret; } EXPORT_SYMBOL(vfs_clone_file_range); /* Check whether we are allowed to dedupe the destination file */ static bool may_dedupe_file(struct file *file) { struct mnt_idmap *idmap = file_mnt_idmap(file); struct inode *inode = file_inode(file); if (capable(CAP_SYS_ADMIN)) return true; if (file->f_mode & FMODE_WRITE) return true; if (vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid())) return true; if (!inode_permission(idmap, inode, MAY_WRITE)) return true; return false; } loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, struct file *dst_file, loff_t dst_pos, loff_t len, unsigned int remap_flags) { loff_t ret; WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_CAN_SHORTEN)); /* * This is redundant if called from vfs_dedupe_file_range(), but other * callers need it and it's not performance sesitive... */ ret = remap_verify_area(src_file, src_pos, len, false); if (ret) return ret; ret = remap_verify_area(dst_file, dst_pos, len, true); if (ret) return ret; /* * This needs to be called after remap_verify_area() because of * sb_start_write() and before may_dedupe_file() because the mount's * MAY_WRITE need to be checked with mnt_get_write_access_file() held. */ ret = mnt_want_write_file(dst_file); if (ret) return ret; ret = -EPERM; if (!may_dedupe_file(dst_file)) goto out_drop_write; ret = -EXDEV; if (file_inode(src_file)->i_sb != file_inode(dst_file)->i_sb) goto out_drop_write; ret = -EISDIR; if (S_ISDIR(file_inode(dst_file)->i_mode)) goto out_drop_write; ret = -EINVAL; if (!dst_file->f_op->remap_file_range) goto out_drop_write; if (len == 0) { ret = 0; goto out_drop_write; } ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file, dst_pos, len, remap_flags | REMAP_FILE_DEDUP); out_drop_write: mnt_drop_write_file(dst_file); return ret; } EXPORT_SYMBOL(vfs_dedupe_file_range_one); int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same) { struct file_dedupe_range_info *info; struct inode *src = file_inode(file); u64 off; u64 len; int i; int ret; u16 count = same->dest_count; loff_t deduped; if (!(file->f_mode & FMODE_READ)) return -EINVAL; if (same->reserved1 || same->reserved2) return -EINVAL; off = same->src_offset; len = same->src_length; if (S_ISDIR(src->i_mode)) return -EISDIR; if (!S_ISREG(src->i_mode)) return -EINVAL; if (!file->f_op->remap_file_range) return -EOPNOTSUPP; ret = remap_verify_area(file, off, len, false); if (ret < 0) return ret; ret = 0; if (off + len > i_size_read(src)) return -EINVAL; /* Arbitrary 1G limit on a single dedupe request, can be raised. */ len = min_t(u64, len, 1 << 30); /* pre-format output fields to sane values */ for (i = 0; i < count; i++) { same->info[i].bytes_deduped = 0ULL; same->info[i].status = FILE_DEDUPE_RANGE_SAME; } for (i = 0, info = same->info; i < count; i++, info++) { CLASS(fd, dst_fd)(info->dest_fd); if (fd_empty(dst_fd)) { info->status = -EBADF; goto next_loop; } if (info->reserved) { info->status = -EINVAL; goto next_loop; } deduped = vfs_dedupe_file_range_one(file, off, fd_file(dst_fd), info->dest_offset, len, REMAP_FILE_CAN_SHORTEN); if (deduped == -EBADE) info->status = FILE_DEDUPE_RANGE_DIFFERS; else if (deduped < 0) info->status = deduped; else info->bytes_deduped = len; next_loop: if (fatal_signal_pending(current)) break; } return ret; } EXPORT_SYMBOL(vfs_dedupe_file_range);
14 14 14 14 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 // SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include <drm/drm_edid.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" #include "drm_displayid_internal.h" static const struct displayid_header * displayid_get_header(const u8 *displayid, int length, int index) { const struct displayid_header *base; if (sizeof(*base) > length - index) return ERR_PTR(-EINVAL); base = (const struct displayid_header *)&displayid[index]; return base; } static const struct displayid_header * validate_displayid(const u8 *displayid, int length, int idx) { int i, dispid_length; u8 csum = 0; const struct displayid_header *base; base = displayid_get_header(displayid, length, idx); if (IS_ERR(base)) return base; /* +1 for DispID checksum */ dispid_length = sizeof(*base) + base->bytes + 1; if (dispid_length > length - idx) return ERR_PTR(-EINVAL); for (i = 0; i < dispid_length; i++) csum += displayid[idx + i]; if (csum) { DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum); return ERR_PTR(-EINVAL); } return base; } static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid, int *length, int *idx, int *ext_index) { const struct displayid_header *base; const u8 *displayid; displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index); if (!displayid) return NULL; /* EDID extensions block checksum isn't for us */ *length = EDID_LENGTH - 1; *idx = 1; base = validate_displayid(displayid, *length, *idx); if (IS_ERR(base)) return NULL; *length = *idx + sizeof(*base) + base->bytes; return displayid; } void displayid_iter_edid_begin(const struct drm_edid *drm_edid, struct displayid_iter *iter) { memset(iter, 0, sizeof(*iter)); iter->drm_edid = drm_edid; } static const struct displayid_block * displayid_iter_block(const struct displayid_iter *iter) { const struct displayid_block *block; if (!iter->section) return NULL; block = (const struct displayid_block *)&iter->section[iter->idx]; if (iter->idx + sizeof(*block) <= iter->length && iter->idx + sizeof(*block) + block->num_bytes <= iter->length) return block; return NULL; } const struct displayid_block * __displayid_iter_next(struct displayid_iter *iter) { const struct displayid_block *block; if (!iter->drm_edid) return NULL; if (iter->section) { /* current block should always be valid */ block = displayid_iter_block(iter); if (WARN_ON(!block)) { iter->section = NULL; iter->drm_edid = NULL; return NULL; } /* next block in section */ iter->idx += sizeof(*block) + block->num_bytes; block = displayid_iter_block(iter); if (block) return block; } for (;;) { /* The first section we encounter is the base section */ bool base_section = !iter->section; iter->section = drm_find_displayid_extension(iter->drm_edid, &iter->length, &iter->idx, &iter->ext_index); if (!iter->section) { iter->drm_edid = NULL; return NULL; } /* Save the structure version and primary use case. */ if (base_section) { const struct displayid_header *base; base = displayid_get_header(iter->section, iter->length, iter->idx); if (!IS_ERR(base)) { iter->version = base->rev; iter->primary_use = base->prod_id; } } iter->idx += sizeof(struct displayid_header); block = displayid_iter_block(iter); if (block) return block; } } void displayid_iter_end(struct displayid_iter *iter) { memset(iter, 0, sizeof(*iter)); } /* DisplayID Structure Version/Revision from the Base Section. */ u8 displayid_version(const struct displayid_iter *iter) { return iter->version; } /* * DisplayID Primary Use Case (2.0+) or Product Type Identifier (1.0-1.3) from * the Base Section. */ u8 displayid_primary_use(const struct displayid_iter *iter) { return iter->primary_use; }
5 5 87 89 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 // SPDX-License-Identifier: GPL-2.0-or-later /* * Video for Linux Two * * A generic video device interface for the LINUX operating system * using a set of device structures/vectors for low level operations. * * This file replaces the videodev.c file that comes with the * regular kernel distribution. * * Author: Bill Dirks <bill@thedirks.org> * based on code by Alan Cox, <alan@cymru.net> */ /* * Video capture interface for Linux * * A generic video device interface for the LINUX operating system * using a set of device structures/vectors for low level operations. * * Author: Alan Cox, <alan@lxorguk.ukuu.org.uk> * * Fixes: */ /* * Video4linux 1/2 integration by Justin Schoeman * <justin@suntiger.ee.up.ac.za> * 2.4 PROCFS support ported from 2.4 kernels by * Iñaki García Etxebarria <garetxe@euskalnet.net> * Makefile fix by "W. Michael Petullo" <mike@flyn.org> * 2.4 devfs support ported from 2.4 kernels by * Dan Merillat <dan@merillat.org> * Added Gerd Knorrs v4l1 enhancements (Justin Schoeman) */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/uaccess.h> #include <asm/io.h> #include <asm/div64.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/v4l2-ctrls.h> #include <linux/videodev2.h> /* * * V 4 L 2 D R I V E R H E L P E R A P I * */ /* * Video Standard Operations (contributed by Michael Schimek) */ /* Helper functions for control handling */ /* Fill in a struct v4l2_queryctrl */ int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 _min, s32 _max, s32 _step, s32 _def) { const char *name; s64 min = _min; s64 max = _max; u64 step = _step; s64 def = _def; v4l2_ctrl_fill(qctrl->id, &name, &qctrl->type, &min, &max, &step, &def, &qctrl->flags); if (name == NULL) return -EINVAL; qctrl->minimum = min; qctrl->maximum = max; qctrl->step = step; qctrl->default_value = def; qctrl->reserved[0] = qctrl->reserved[1] = 0; strscpy(qctrl->name, name, sizeof(qctrl->name)); return 0; } EXPORT_SYMBOL(v4l2_ctrl_query_fill); /* Clamp x to be between min and max, aligned to a multiple of 2^align. min * and max don't have to be aligned, but there must be at least one valid * value. E.g., min=17,max=31,align=4 is not allowed as there are no multiples * of 16 between 17 and 31. */ static unsigned int clamp_align(unsigned int x, unsigned int min, unsigned int max, unsigned int align) { /* Bits that must be zero to be aligned */ unsigned int mask = ~((1 << align) - 1); /* Clamp to aligned min and max */ x = clamp(x, (min + ~mask) & mask, max & mask); /* Round to nearest aligned value */ if (align) x = (x + (1 << (align - 1))) & mask; return x; } static unsigned int clamp_roundup(unsigned int x, unsigned int min, unsigned int max, unsigned int alignment) { x = clamp(x, min, max); if (alignment) x = round_up(x, alignment); return x; } void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax, unsigned int walign, u32 *h, unsigned int hmin, unsigned int hmax, unsigned int halign, unsigned int salign) { *w = clamp_align(*w, wmin, wmax, walign); *h = clamp_align(*h, hmin, hmax, halign); /* Usually we don't need to align the size and are done now. */ if (!salign) return; /* How much alignment do we have? */ walign = __ffs(*w); halign = __ffs(*h); /* Enough to satisfy the image alignment? */ if (walign + halign < salign) { /* Max walign where there is still a valid width */ unsigned int wmaxa = __fls(wmax ^ (wmin - 1)); /* Max halign where there is still a valid height */ unsigned int hmaxa = __fls(hmax ^ (hmin - 1)); /* up the smaller alignment until we have enough */ do { if (halign >= hmaxa || (walign <= halign && walign < wmaxa)) { *w = clamp_align(*w, wmin, wmax, walign + 1); walign = __ffs(*w); } else { *h = clamp_align(*h, hmin, hmax, halign + 1); halign = __ffs(*h); } } while (halign + walign < salign); } } EXPORT_SYMBOL_GPL(v4l_bound_align_image); const void * __v4l2_find_nearest_size(const void *array, size_t array_size, size_t entry_size, size_t width_offset, size_t height_offset, s32 width, s32 height) { u32 error, min_error = U32_MAX; const void *best = NULL; unsigned int i; if (!array) return NULL; for (i = 0; i < array_size; i++, array += entry_size) { const u32 *entry_width = array + width_offset; const u32 *entry_height = array + height_offset; error = abs(*entry_width - width) + abs(*entry_height - height); if (error > min_error) continue; min_error = error; best = array; if (!error) break; } return best; } EXPORT_SYMBOL_GPL(__v4l2_find_nearest_size); int v4l2_g_parm_cap(struct video_device *vdev, struct v4l2_subdev *sd, struct v4l2_streamparm *a) { struct v4l2_subdev_frame_interval ival = { 0 }; int ret; if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; if (vdev->device_caps & V4L2_CAP_READWRITE) a->parm.capture.readbuffers = 2; if (v4l2_subdev_has_op(sd, pad, get_frame_interval)) a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; ret = v4l2_subdev_call_state_active(sd, pad, get_frame_interval, &ival); if (!ret) a->parm.capture.timeperframe = ival.interval; return ret; } EXPORT_SYMBOL_GPL(v4l2_g_parm_cap); int v4l2_s_parm_cap(struct video_device *vdev, struct v4l2_subdev *sd, struct v4l2_streamparm *a) { struct v4l2_subdev_frame_interval ival = { .interval = a->parm.capture.timeperframe }; int ret; if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE && a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; memset(&a->parm, 0, sizeof(a->parm)); if (vdev->device_caps & V4L2_CAP_READWRITE) a->parm.capture.readbuffers = 2; else a->parm.capture.readbuffers = 0; if (v4l2_subdev_has_op(sd, pad, get_frame_interval)) a->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; ret = v4l2_subdev_call_state_active(sd, pad, set_frame_interval, &ival); if (!ret) a->parm.capture.timeperframe = ival.interval; return ret; } EXPORT_SYMBOL_GPL(v4l2_s_parm_cap); const struct v4l2_format_info *v4l2_format_info(u32 format) { static const struct v4l2_format_info formats[] = { /* RGB formats */ { .format = V4L2_PIX_FMT_BGR24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_RGB24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_HSV24, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 3, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_BGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_XBGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_BGRX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_RGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_XRGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_RGBX32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_HSV32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_ARGB32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_RGBA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_ABGR32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_BGRA32, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_BGR48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_RGB48, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_RGBA1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_RGBX1010102, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_ARGB2101010, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, /* YUV packed formats */ { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_Y210, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_Y216, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_MT2110T, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }}, { .format = V4L2_PIX_FMT_MT2110R, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, .block_w = { 16, 8, 0, 0 }, .block_h = { 32, 16, 0, 0 }}, /* YUV planar formats */ { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_NV21, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_NV16, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_NV61, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 }, { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 4 }, { .format = V4L2_PIX_FMT_YUV411P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 4, .vdiv = 1 }, { .format = V4L2_PIX_FMT_YUV420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_YVU420, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, /* Tiled YUV formats */ { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_NV15_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 5, 10, 0, 0 }, .bpp_div = { 4, 4, 1, 1 }, .hdiv = 2, .vdiv = 2, .block_w = { 4, 2, 0, 0 }, .block_h = { 1, 1, 0, 0 }}, { .format = V4L2_PIX_FMT_P010_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, /* YUV planar formats, non contiguous variant */ { .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_YUV422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_YVU422M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_YUV444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_YVU444M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_NV12M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 2, .vdiv = 2 }, /* Bayer RGB formats */ { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SGBRG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SGRBG8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SRGGB8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SBGGR10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SGBRG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SGRBG10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SRGGB10, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SBGGR10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SGBRG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SGRBG10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SRGGB10ALAW8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SBGGR10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SGBRG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SGRBG10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SRGGB10DPCM8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SBGGR12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SGBRG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SGRBG12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_SRGGB12, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .bpp_div = { 1, 1, 1, 1 }, .hdiv = 1, .vdiv = 1 }, }; unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); ++i) if (formats[i].format == format) return &formats[i]; return NULL; } EXPORT_SYMBOL(v4l2_format_info); static inline unsigned int v4l2_format_block_width(const struct v4l2_format_info *info, int plane) { if (!info->block_w[plane]) return 1; return info->block_w[plane]; } static inline unsigned int v4l2_format_block_height(const struct v4l2_format_info *info, int plane) { if (!info->block_h[plane]) return 1; return info->block_h[plane]; } void v4l2_apply_frmsize_constraints(u32 *width, u32 *height, const struct v4l2_frmsize_stepwise *frmsize) { if (!frmsize) return; /* * Clamp width/height to meet min/max constraints and round it up to * macroblock alignment. */ *width = clamp_roundup(*width, frmsize->min_width, frmsize->max_width, frmsize->step_width); *height = clamp_roundup(*height, frmsize->min_height, frmsize->max_height, frmsize->step_height); } EXPORT_SYMBOL_GPL(v4l2_apply_frmsize_constraints); int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat, u32 width, u32 height) { const struct v4l2_format_info *info; struct v4l2_plane_pix_format *plane; int i; info = v4l2_format_info(pixelformat); if (!info) return -EINVAL; pixfmt->width = width; pixfmt->height = height; pixfmt->pixelformat = pixelformat; pixfmt->num_planes = info->mem_planes; if (info->mem_planes == 1) { plane = &pixfmt->plane_fmt[0]; plane->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0]; plane->sizeimage = 0; for (i = 0; i < info->comp_planes; i++) { unsigned int hdiv = (i == 0) ? 1 : info->hdiv; unsigned int vdiv = (i == 0) ? 1 : info->vdiv; unsigned int aligned_width; unsigned int aligned_height; aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); plane->sizeimage += info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) * DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i]; } } else { for (i = 0; i < info->comp_planes; i++) { unsigned int hdiv = (i == 0) ? 1 : info->hdiv; unsigned int vdiv = (i == 0) ? 1 : info->vdiv; unsigned int aligned_width; unsigned int aligned_height; aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); plane = &pixfmt->plane_fmt[i]; plane->bytesperline = info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) / info->bpp_div[i]; plane->sizeimage = plane->bytesperline * DIV_ROUND_UP(aligned_height, vdiv); } } return 0; } EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt_mp); int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, u32 width, u32 height) { const struct v4l2_format_info *info; int i; info = v4l2_format_info(pixelformat); if (!info) return -EINVAL; /* Single planar API cannot be used for multi plane formats. */ if (info->mem_planes > 1) return -EINVAL; pixfmt->width = width; pixfmt->height = height; pixfmt->pixelformat = pixelformat; pixfmt->bytesperline = ALIGN(width, v4l2_format_block_width(info, 0)) * info->bpp[0] / info->bpp_div[0]; pixfmt->sizeimage = 0; for (i = 0; i < info->comp_planes; i++) { unsigned int hdiv = (i == 0) ? 1 : info->hdiv; unsigned int vdiv = (i == 0) ? 1 : info->vdiv; unsigned int aligned_width; unsigned int aligned_height; aligned_width = ALIGN(width, v4l2_format_block_width(info, i)); aligned_height = ALIGN(height, v4l2_format_block_height(info, i)); pixfmt->sizeimage += info->bpp[i] * DIV_ROUND_UP(aligned_width, hdiv) * DIV_ROUND_UP(aligned_height, vdiv) / info->bpp_div[i]; } return 0; } EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt); s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, unsigned int div) { struct v4l2_ctrl *ctrl; s64 freq; ctrl = v4l2_ctrl_find(handler, V4L2_CID_LINK_FREQ); if (ctrl) { struct v4l2_querymenu qm = { .id = V4L2_CID_LINK_FREQ }; int ret; qm.index = v4l2_ctrl_g_ctrl(ctrl); ret = v4l2_querymenu(handler, &qm); if (ret) return -ENOENT; freq = qm.value; } else { if (!mul || !div) return -ENOENT; ctrl = v4l2_ctrl_find(handler, V4L2_CID_PIXEL_RATE); if (!ctrl) return -ENOENT; freq = div_u64(v4l2_ctrl_g_ctrl_int64(ctrl) * mul, div); pr_warn("%s: Link frequency estimated using pixel rate: result might be inaccurate\n", __func__); pr_warn("%s: Consider implementing support for V4L2_CID_LINK_FREQ in the transmitter driver\n", __func__); } return freq > 0 ? freq : -EINVAL; } EXPORT_SYMBOL_GPL(v4l2_get_link_freq); /* * Simplify a fraction using a simple continued fraction decomposition. The * idea here is to convert fractions such as 333333/10000000 to 1/30 using * 32 bit arithmetic only. The algorithm is not perfect and relies upon two * arbitrary parameters to remove non-significative terms from the simple * continued fraction decomposition. Using 8 and 333 for n_terms and threshold * respectively seems to give nice results. */ void v4l2_simplify_fraction(u32 *numerator, u32 *denominator, unsigned int n_terms, unsigned int threshold) { u32 *an; u32 x, y, r; unsigned int i, n; an = kmalloc_array(n_terms, sizeof(*an), GFP_KERNEL); if (an == NULL) return; /* * Convert the fraction to a simple continued fraction. See * https://en.wikipedia.org/wiki/Continued_fraction * Stop if the current term is bigger than or equal to the given * threshold. */ x = *numerator; y = *denominator; for (n = 0; n < n_terms && y != 0; ++n) { an[n] = x / y; if (an[n] >= threshold) { if (n < 2) n++; break; } r = x - an[n] * y; x = y; y = r; } /* Expand the simple continued fraction back to an integer fraction. */ x = 0; y = 1; for (i = n; i > 0; --i) { r = y; y = an[i-1] * y + x; x = r; } *numerator = y; *denominator = x; kfree(an); } EXPORT_SYMBOL_GPL(v4l2_simplify_fraction); /* * Convert a fraction to a frame interval in 100ns multiples. The idea here is * to compute numerator / denominator * 10000000 using 32 bit fixed point * arithmetic only. */ u32 v4l2_fraction_to_interval(u32 numerator, u32 denominator) { u32 multiplier; /* Saturate the result if the operation would overflow. */ if (denominator == 0 || numerator/denominator >= ((u32)-1)/10000000) return (u32)-1; /* * Divide both the denominator and the multiplier by two until * numerator * multiplier doesn't overflow. If anyone knows a better * algorithm please let me know. */ multiplier = 10000000; while (numerator > ((u32)-1)/multiplier) { multiplier /= 2; denominator /= 2; } return denominator ? numerator * multiplier / denominator : 0; } EXPORT_SYMBOL_GPL(v4l2_fraction_to_interval); int v4l2_link_freq_to_bitmap(struct device *dev, const u64 *fw_link_freqs, unsigned int num_of_fw_link_freqs, const s64 *driver_link_freqs, unsigned int num_of_driver_link_freqs, unsigned long *bitmap) { unsigned int i; *bitmap = 0; if (!num_of_fw_link_freqs) { dev_err(dev, "no link frequencies in firmware\n"); return -ENODATA; } for (i = 0; i < num_of_fw_link_freqs; i++) { unsigned int j; for (j = 0; j < num_of_driver_link_freqs; j++) { if (fw_link_freqs[i] != driver_link_freqs[j]) continue; dev_dbg(dev, "enabling link frequency %lld Hz\n", driver_link_freqs[j]); *bitmap |= BIT(j); break; } } if (!*bitmap) { dev_err(dev, "no matching link frequencies found\n"); dev_dbg(dev, "specified in firmware:\n"); for (i = 0; i < num_of_fw_link_freqs; i++) dev_dbg(dev, "\t%llu Hz\n", fw_link_freqs[i]); dev_dbg(dev, "driver supported:\n"); for (i = 0; i < num_of_driver_link_freqs; i++) dev_dbg(dev, "\t%lld Hz\n", driver_link_freqs[i]); return -ENOENT; } return 0; } EXPORT_SYMBOL_GPL(v4l2_link_freq_to_bitmap);
9 9 9 9 9 9 9 9 9 9 9 9 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 // SPDX-License-Identifier: GPL-2.0-or-later /* * Abilis Systems Single DVB-T Receiver * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com> * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/firmware.h> #include "as102_drv.h" #include "as102_fw.h" static const char as102_st_fw1[] = "as102_data1_st.hex"; static const char as102_st_fw2[] = "as102_data2_st.hex"; static const char as102_dt_fw1[] = "as102_data1_dt.hex"; static const char as102_dt_fw2[] = "as102_data2_dt.hex"; static unsigned char atohx(unsigned char *dst, char *src) { unsigned char value = 0; char msb = tolower(*src) - '0'; char lsb = tolower(*(src + 1)) - '0'; if (msb > 9) msb -= 7; if (lsb > 9) lsb -= 7; *dst = value = ((msb & 0xF) << 4) | (lsb & 0xF); return value; } /* * Parse INTEL HEX firmware file to extract address and data. */ static int parse_hex_line(unsigned char *fw_data, unsigned char *addr, unsigned char *data, int *dataLength, unsigned char *addr_has_changed) { int count = 0; unsigned char *src, dst; if (*fw_data++ != ':') { pr_err("invalid firmware file\n"); return -EFAULT; } /* locate end of line */ for (src = fw_data; *src != '\n'; src += 2) { atohx(&dst, src); /* parse line to split addr / data */ switch (count) { case 0: *dataLength = dst; break; case 1: addr[2] = dst; break; case 2: addr[3] = dst; break; case 3: /* check if data is an address */ if (dst == 0x04) *addr_has_changed = 1; else *addr_has_changed = 0; break; case 4: case 5: if (*addr_has_changed) addr[(count - 4)] = dst; else data[(count - 4)] = dst; break; default: data[(count - 4)] = dst; break; } count++; } /* return read value + ':' + '\n' */ return (count * 2) + 2; } static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, unsigned char *cmd, const struct firmware *firmware) { struct as10x_fw_pkt_t *fw_pkt; int total_read_bytes = 0, errno = 0; unsigned char addr_has_changed = 0; fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL); if (!fw_pkt) return -ENOMEM; for (total_read_bytes = 0; total_read_bytes < firmware->size; ) { int read_bytes = 0, data_len = 0; /* parse intel hex line */ read_bytes = parse_hex_line( (u8 *) (firmware->data + total_read_bytes), fw_pkt->raw.address, fw_pkt->raw.data, &data_len, &addr_has_changed); if (read_bytes <= 0) goto error; /* detect the end of file */ total_read_bytes += read_bytes; if (total_read_bytes == firmware->size) { fw_pkt->u.request[0] = 0x00; fw_pkt->u.request[1] = 0x03; /* send EOF command */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) fw_pkt, 2, 0); if (errno < 0) goto error; } else { if (!addr_has_changed) { /* prepare command to send */ fw_pkt->u.request[0] = 0x00; fw_pkt->u.request[1] = 0x01; data_len += sizeof(fw_pkt->u.request); data_len += sizeof(fw_pkt->raw.address); /* send cmd to device */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) fw_pkt, data_len, 0); if (errno < 0) goto error; } } } error: kfree(fw_pkt); return (errno == 0) ? total_read_bytes : errno; } int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap) { int errno = -EFAULT; const struct firmware *firmware = NULL; unsigned char *cmd_buf = NULL; const char *fw1, *fw2; struct usb_device *dev = bus_adap->usb_dev; /* select fw file to upload */ if (dual_tuner) { fw1 = as102_dt_fw1; fw2 = as102_dt_fw2; } else { fw1 = as102_st_fw1; fw2 = as102_st_fw2; } /* allocate buffer to store firmware upload command and data */ cmd_buf = kzalloc(MAX_FW_PKT_SIZE, GFP_KERNEL); if (cmd_buf == NULL) { errno = -ENOMEM; goto error; } /* request kernel to locate firmware file: part1 */ errno = request_firmware(&firmware, fw1, &dev->dev); if (errno < 0) { pr_err("%s: unable to locate firmware file: %s\n", DRIVER_NAME, fw1); goto error; } /* initiate firmware upload */ errno = as102_firmware_upload(bus_adap, cmd_buf, firmware); if (errno < 0) { pr_err("%s: error during firmware upload part1\n", DRIVER_NAME); goto error; } pr_info("%s: firmware: %s loaded with success\n", DRIVER_NAME, fw1); release_firmware(firmware); firmware = NULL; /* wait for boot to complete */ mdelay(100); /* request kernel to locate firmware file: part2 */ errno = request_firmware(&firmware, fw2, &dev->dev); if (errno < 0) { pr_err("%s: unable to locate firmware file: %s\n", DRIVER_NAME, fw2); goto error; } /* initiate firmware upload */ errno = as102_firmware_upload(bus_adap, cmd_buf, firmware); if (errno < 0) { pr_err("%s: error during firmware upload part2\n", DRIVER_NAME); goto error; } pr_info("%s: firmware: %s loaded with success\n", DRIVER_NAME, fw2); error: kfree(cmd_buf); release_firmware(firmware); return errno; }
93 9 20 11 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2006, Johannes Berg <johannes@sipsolutions.net> */ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/leds.h> #include "ieee80211_i.h" #define MAC80211_BLINK_DELAY 50 /* ms */ static inline void ieee80211_led_rx(struct ieee80211_local *local) { #ifdef CONFIG_MAC80211_LEDS if (!atomic_read(&local->rx_led_active)) return; led_trigger_blink_oneshot(&local->rx_led, MAC80211_BLINK_DELAY, MAC80211_BLINK_DELAY, 0); #endif } static inline void ieee80211_led_tx(struct ieee80211_local *local) { #ifdef CONFIG_MAC80211_LEDS if (!atomic_read(&local->tx_led_active)) return; led_trigger_blink_oneshot(&local->tx_led, MAC80211_BLINK_DELAY, MAC80211_BLINK_DELAY, 0); #endif } #ifdef CONFIG_MAC80211_LEDS void ieee80211_led_assoc(struct ieee80211_local *local, bool associated); void ieee80211_led_radio(struct ieee80211_local *local, bool enabled); void ieee80211_alloc_led_names(struct ieee80211_local *local); void ieee80211_free_led_names(struct ieee80211_local *local); void ieee80211_led_init(struct ieee80211_local *local); void ieee80211_led_exit(struct ieee80211_local *local); void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, unsigned int types_on, unsigned int types_off); #else static inline void ieee80211_led_assoc(struct ieee80211_local *local, bool associated) { } static inline void ieee80211_led_radio(struct ieee80211_local *local, bool enabled) { } static inline void ieee80211_alloc_led_names(struct ieee80211_local *local) { } static inline void ieee80211_free_led_names(struct ieee80211_local *local) { } static inline void ieee80211_led_init(struct ieee80211_local *local) { } static inline void ieee80211_led_exit(struct ieee80211_local *local) { } static inline void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local, unsigned int types_on, unsigned int types_off) { } #endif static inline void ieee80211_tpt_led_trig_tx(struct ieee80211_local *local, int bytes) { #ifdef CONFIG_MAC80211_LEDS if (atomic_read(&local->tpt_led_active)) local->tpt_led_trigger->tx_bytes += bytes; #endif } static inline void ieee80211_tpt_led_trig_rx(struct ieee80211_local *local, int bytes) { #ifdef CONFIG_MAC80211_LEDS if (atomic_read(&local->tpt_led_active)) local->tpt_led_trigger->rx_bytes += bytes; #endif }
3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 // SPDX-License-Identifier: GPL-2.0 /* * linux/fs/ext4/bitmap.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) */ #include <linux/buffer_head.h> #include "ext4.h" unsigned int ext4_count_free(char *bitmap, unsigned int numchars) { return numchars * BITS_PER_BYTE - memweight(bitmap, numchars); } int ext4_inode_bitmap_csum_verify(struct super_block *sb, struct ext4_group_desc *gdp, struct buffer_head *bh) { __u32 hi; __u32 provided, calculated; struct ext4_sb_info *sbi = EXT4_SB(sb); int sz; if (!ext4_has_metadata_csum(sb)) return 1; sz = EXT4_INODES_PER_GROUP(sb) >> 3; provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo); calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) { hi = le16_to_cpu(gdp->bg_inode_bitmap_csum_hi); provided |= (hi << 16); } else calculated &= 0xFFFF; return provided == calculated; } void ext4_inode_bitmap_csum_set(struct super_block *sb, struct ext4_group_desc *gdp, struct buffer_head *bh) { __u32 csum; struct ext4_sb_info *sbi = EXT4_SB(sb); int sz; if (!ext4_has_metadata_csum(sb)) return; sz = EXT4_INODES_PER_GROUP(sb) >> 3; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF); if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) gdp->bg_inode_bitmap_csum_hi = cpu_to_le16(csum >> 16); } int ext4_block_bitmap_csum_verify(struct super_block *sb, struct ext4_group_desc *gdp, struct buffer_head *bh) { __u32 hi; __u32 provided, calculated; struct ext4_sb_info *sbi = EXT4_SB(sb); int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8; if (!ext4_has_metadata_csum(sb)) return 1; provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo); calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) { hi = le16_to_cpu(gdp->bg_block_bitmap_csum_hi); provided |= (hi << 16); } else calculated &= 0xFFFF; return provided == calculated; } void ext4_block_bitmap_csum_set(struct super_block *sb, struct ext4_group_desc *gdp, struct buffer_head *bh) { int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8; __u32 csum; struct ext4_sb_info *sbi = EXT4_SB(sb); if (!ext4_has_metadata_csum(sb)) return; csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); gdp->bg_block_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF); if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) gdp->bg_block_bitmap_csum_hi = cpu_to_le16(csum >> 16); }
2 8 25 5 5 5 5 5 5 5 5 5 1 4 5 5 5 5 5 5 5 4 4 4 4 4 4 4 5 5 2 5 5 2 5 4 4 4 4 4 4 2 2 2 22 22 22 22 22 20 7 22 22 22 22 22 19 20 20 20 20 20 20 2 20 3 3 4 4 3 4 4 4 4 3 4 3 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 4 11 4 4 4 31 31 31 31 12 5 6 12 11 3 7 7 10 24 24 7 10 7 17 1 25 25 4 4 21 4 21 26 25 24 8 24 24 6 2 3 3 14 14 14 1 1 22 28 28 2 2 2 2 10 10 10 5 4 4 2 4 4 10 19 29 4 3 3 26 26 26 29 29 16 14 29 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 /* * DRBG: Deterministic Random Bits Generator * Based on NIST Recommended DRBG from NIST SP800-90A with the following * properties: * * CTR DRBG with DF with AES-128, AES-192, AES-256 cores * * Hash DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores * * HMAC DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores * * with and without prediction resistance * * Copyright Stephan Mueller <smueller@chronox.de>, 2014 * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * ALTERNATIVELY, this product may be distributed under the terms of * the GNU General Public License, in which case the provisions of the GPL are * required INSTEAD OF the above restrictions. (This clause is * necessary due to a potential bad interaction between the GPL and * the restrictions contained in a BSD-style copyright.) * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * DRBG Usage * ========== * The SP 800-90A DRBG allows the user to specify a personalization string * for initialization as well as an additional information string for each * random number request. The following code fragments show how a caller * uses the kernel crypto API to use the full functionality of the DRBG. * * Usage without any additional data * --------------------------------- * struct crypto_rng *drng; * int err; * char data[DATALEN]; * * drng = crypto_alloc_rng(drng_name, 0, 0); * err = crypto_rng_get_bytes(drng, &data, DATALEN); * crypto_free_rng(drng); * * * Usage with personalization string during initialization * ------------------------------------------------------- * struct crypto_rng *drng; * int err; * char data[DATALEN]; * struct drbg_string pers; * char personalization[11] = "some-string"; * * drbg_string_fill(&pers, personalization, strlen(personalization)); * drng = crypto_alloc_rng(drng_name, 0, 0); * // The reset completely re-initializes the DRBG with the provided * // personalization string * err = crypto_rng_reset(drng, &personalization, strlen(personalization)); * err = crypto_rng_get_bytes(drng, &data, DATALEN); * crypto_free_rng(drng); * * * Usage with additional information string during random number request * --------------------------------------------------------------------- * struct crypto_rng *drng; * int err; * char data[DATALEN]; * char addtl_string[11] = "some-string"; * string drbg_string addtl; * * drbg_string_fill(&addtl, addtl_string, strlen(addtl_string)); * drng = crypto_alloc_rng(drng_name, 0, 0); * // The following call is a wrapper to crypto_rng_get_bytes() and returns * // the same error codes. * err = crypto_drbg_get_bytes_addtl(drng, &data, DATALEN, &addtl); * crypto_free_rng(drng); * * * Usage with personalization and additional information strings * ------------------------------------------------------------- * Just mix both scenarios above. */ #include <crypto/drbg.h> #include <crypto/internal/cipher.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/string_choices.h> /*************************************************************** * Backend cipher definitions available to DRBG ***************************************************************/ /* * The order of the DRBG definitions here matter: every DRBG is registered * as stdrng. Each DRBG receives an increasing cra_priority values the later * they are defined in this array (see drbg_fill_array). * * HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and the * HMAC-SHA512 / SHA256 / AES 256 over other ciphers. Thus, the * favored DRBGs are the latest entries in this array. */ static const struct drbg_core drbg_cores[] = { #ifdef CONFIG_CRYPTO_DRBG_CTR { .flags = DRBG_CTR | DRBG_STRENGTH128, .statelen = 32, /* 256 bits as defined in 10.2.1 */ .blocklen_bytes = 16, .cra_name = "ctr_aes128", .backend_cra_name = "aes", }, { .flags = DRBG_CTR | DRBG_STRENGTH192, .statelen = 40, /* 320 bits as defined in 10.2.1 */ .blocklen_bytes = 16, .cra_name = "ctr_aes192", .backend_cra_name = "aes", }, { .flags = DRBG_CTR | DRBG_STRENGTH256, .statelen = 48, /* 384 bits as defined in 10.2.1 */ .blocklen_bytes = 16, .cra_name = "ctr_aes256", .backend_cra_name = "aes", }, #endif /* CONFIG_CRYPTO_DRBG_CTR */ #ifdef CONFIG_CRYPTO_DRBG_HASH { .flags = DRBG_HASH | DRBG_STRENGTH256, .statelen = 111, /* 888 bits */ .blocklen_bytes = 48, .cra_name = "sha384", .backend_cra_name = "sha384", }, { .flags = DRBG_HASH | DRBG_STRENGTH256, .statelen = 111, /* 888 bits */ .blocklen_bytes = 64, .cra_name = "sha512", .backend_cra_name = "sha512", }, { .flags = DRBG_HASH | DRBG_STRENGTH256, .statelen = 55, /* 440 bits */ .blocklen_bytes = 32, .cra_name = "sha256", .backend_cra_name = "sha256", }, #endif /* CONFIG_CRYPTO_DRBG_HASH */ #ifdef CONFIG_CRYPTO_DRBG_HMAC { .flags = DRBG_HMAC | DRBG_STRENGTH256, .statelen = 48, /* block length of cipher */ .blocklen_bytes = 48, .cra_name = "hmac_sha384", .backend_cra_name = "hmac(sha384)", }, { .flags = DRBG_HMAC | DRBG_STRENGTH256, .statelen = 32, /* block length of cipher */ .blocklen_bytes = 32, .cra_name = "hmac_sha256", .backend_cra_name = "hmac(sha256)", }, { .flags = DRBG_HMAC | DRBG_STRENGTH256, .statelen = 64, /* block length of cipher */ .blocklen_bytes = 64, .cra_name = "hmac_sha512", .backend_cra_name = "hmac(sha512)", }, #endif /* CONFIG_CRYPTO_DRBG_HMAC */ }; static int drbg_uninstantiate(struct drbg_state *drbg); /****************************************************************** * Generic helper functions ******************************************************************/ /* * Return strength of DRBG according to SP800-90A section 8.4 * * @flags DRBG flags reference * * Return: normalized strength in *bytes* value or 32 as default * to counter programming errors */ static inline unsigned short drbg_sec_strength(drbg_flag_t flags) { switch (flags & DRBG_STRENGTH_MASK) { case DRBG_STRENGTH128: return 16; case DRBG_STRENGTH192: return 24; case DRBG_STRENGTH256: return 32; default: return 32; } } /* * FIPS 140-2 continuous self test for the noise source * The test is performed on the noise source input data. Thus, the function * implicitly knows the size of the buffer to be equal to the security * strength. * * Note, this function disregards the nonce trailing the entropy data during * initial seeding. * * drbg->drbg_mutex must have been taken. * * @drbg DRBG handle * @entropy buffer of seed data to be checked * * return: * 0 on success * -EAGAIN on when the CTRNG is not yet primed * < 0 on error */ static int drbg_fips_continuous_test(struct drbg_state *drbg, const unsigned char *entropy) { unsigned short entropylen = drbg_sec_strength(drbg->core->flags); int ret = 0; if (!IS_ENABLED(CONFIG_CRYPTO_FIPS)) return 0; /* skip test if we test the overall system */ if (list_empty(&drbg->test_data.list)) return 0; /* only perform test in FIPS mode */ if (!fips_enabled) return 0; if (!drbg->fips_primed) { /* Priming of FIPS test */ memcpy(drbg->prev, entropy, entropylen); drbg->fips_primed = true; /* priming: another round is needed */ return -EAGAIN; } ret = memcmp(drbg->prev, entropy, entropylen); if (!ret) panic("DRBG continuous self test failed\n"); memcpy(drbg->prev, entropy, entropylen); /* the test shall pass when the two values are not equal */ return 0; } /* * Convert an integer into a byte representation of this integer. * The byte representation is big-endian * * @val value to be converted * @buf buffer holding the converted integer -- caller must ensure that * buffer size is at least 32 bit */ #if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR)) static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf) { struct s { __be32 conv; }; struct s *conversion = (struct s *) buf; conversion->conv = cpu_to_be32(val); } #endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */ /****************************************************************** * CTR DRBG callback functions ******************************************************************/ #ifdef CONFIG_CRYPTO_DRBG_CTR #define CRYPTO_DRBG_CTR_STRING "CTR " MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes256"); MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes256"); MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes192"); MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192"); MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128"); MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128"); static void drbg_kcapi_symsetkey(struct drbg_state *drbg, const unsigned char *key); static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, const struct drbg_string *in); static int drbg_init_sym_kernel(struct drbg_state *drbg); static int drbg_fini_sym_kernel(struct drbg_state *drbg); static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *inbuf, u32 inbuflen, u8 *outbuf, u32 outlen); #define DRBG_OUTSCRATCHLEN 256 /* BCC function for CTR DRBG as defined in 10.4.3 */ static int drbg_ctr_bcc(struct drbg_state *drbg, unsigned char *out, const unsigned char *key, struct list_head *in) { int ret = 0; struct drbg_string *curr = NULL; struct drbg_string data; short cnt = 0; drbg_string_fill(&data, out, drbg_blocklen(drbg)); /* 10.4.3 step 2 / 4 */ drbg_kcapi_symsetkey(drbg, key); list_for_each_entry(curr, in, list) { const unsigned char *pos = curr->buf; size_t len = curr->len; /* 10.4.3 step 4.1 */ while (len) { /* 10.4.3 step 4.2 */ if (drbg_blocklen(drbg) == cnt) { cnt = 0; ret = drbg_kcapi_sym(drbg, out, &data); if (ret) return ret; } out[cnt] ^= *pos; pos++; cnt++; len--; } } /* 10.4.3 step 4.2 for last block */ if (cnt) ret = drbg_kcapi_sym(drbg, out, &data); return ret; } /* * scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df * (and drbg_ctr_bcc, but this function does not need any temporary buffers), * the scratchpad is used as follows: * drbg_ctr_update: * temp * start: drbg->scratchpad * length: drbg_statelen(drbg) + drbg_blocklen(drbg) * note: the cipher writing into this variable works * blocklen-wise. Now, when the statelen is not a multiple * of blocklen, the generateion loop below "spills over" * by at most blocklen. Thus, we need to give sufficient * memory. * df_data * start: drbg->scratchpad + * drbg_statelen(drbg) + drbg_blocklen(drbg) * length: drbg_statelen(drbg) * * drbg_ctr_df: * pad * start: df_data + drbg_statelen(drbg) * length: drbg_blocklen(drbg) * iv * start: pad + drbg_blocklen(drbg) * length: drbg_blocklen(drbg) * temp * start: iv + drbg_blocklen(drbg) * length: drbg_satelen(drbg) + drbg_blocklen(drbg) * note: temp is the buffer that the BCC function operates * on. BCC operates blockwise. drbg_statelen(drbg) * is sufficient when the DRBG state length is a multiple * of the block size. For AES192 (and maybe other ciphers) * this is not correct and the length for temp is * insufficient (yes, that also means for such ciphers, * the final output of all BCC rounds are truncated). * Therefore, add drbg_blocklen(drbg) to cover all * possibilities. */ /* Derivation Function for CTR DRBG as defined in 10.4.2 */ static int drbg_ctr_df(struct drbg_state *drbg, unsigned char *df_data, size_t bytes_to_return, struct list_head *seedlist) { int ret = -EFAULT; unsigned char L_N[8]; /* S3 is input */ struct drbg_string S1, S2, S4, cipherin; LIST_HEAD(bcc_list); unsigned char *pad = df_data + drbg_statelen(drbg); unsigned char *iv = pad + drbg_blocklen(drbg); unsigned char *temp = iv + drbg_blocklen(drbg); size_t padlen = 0; unsigned int templen = 0; /* 10.4.2 step 7 */ unsigned int i = 0; /* 10.4.2 step 8 */ const unsigned char *K = (unsigned char *) "\x00\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" "\x10\x11\x12\x13\x14\x15\x16\x17" "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"; unsigned char *X; size_t generated_len = 0; size_t inputlen = 0; struct drbg_string *seed = NULL; memset(pad, 0, drbg_blocklen(drbg)); memset(iv, 0, drbg_blocklen(drbg)); /* 10.4.2 step 1 is implicit as we work byte-wise */ /* 10.4.2 step 2 */ if ((512/8) < bytes_to_return) return -EINVAL; /* 10.4.2 step 2 -- calculate the entire length of all input data */ list_for_each_entry(seed, seedlist, list) inputlen += seed->len; drbg_cpu_to_be32(inputlen, &L_N[0]); /* 10.4.2 step 3 */ drbg_cpu_to_be32(bytes_to_return, &L_N[4]); /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */ padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg)); /* wrap the padlen appropriately */ if (padlen) padlen = drbg_blocklen(drbg) - padlen; /* * pad / padlen contains the 0x80 byte and the following zero bytes. * As the calculated padlen value only covers the number of zero * bytes, this value has to be incremented by one for the 0x80 byte. */ padlen++; pad[0] = 0x80; /* 10.4.2 step 4 -- first fill the linked list and then order it */ drbg_string_fill(&S1, iv, drbg_blocklen(drbg)); list_add_tail(&S1.list, &bcc_list); drbg_string_fill(&S2, L_N, sizeof(L_N)); list_add_tail(&S2.list, &bcc_list); list_splice_tail(seedlist, &bcc_list); drbg_string_fill(&S4, pad, padlen); list_add_tail(&S4.list, &bcc_list); /* 10.4.2 step 9 */ while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) { /* * 10.4.2 step 9.1 - the padding is implicit as the buffer * holds zeros after allocation -- even the increment of i * is irrelevant as the increment remains within length of i */ drbg_cpu_to_be32(i, iv); /* 10.4.2 step 9.2 -- BCC and concatenation with temp */ ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list); if (ret) goto out; /* 10.4.2 step 9.3 */ i++; templen += drbg_blocklen(drbg); } /* 10.4.2 step 11 */ X = temp + (drbg_keylen(drbg)); drbg_string_fill(&cipherin, X, drbg_blocklen(drbg)); /* 10.4.2 step 12: overwriting of outval is implemented in next step */ /* 10.4.2 step 13 */ drbg_kcapi_symsetkey(drbg, temp); while (generated_len < bytes_to_return) { short blocklen = 0; /* * 10.4.2 step 13.1: the truncation of the key length is * implicit as the key is only drbg_blocklen in size based on * the implementation of the cipher function callback */ ret = drbg_kcapi_sym(drbg, X, &cipherin); if (ret) goto out; blocklen = (drbg_blocklen(drbg) < (bytes_to_return - generated_len)) ? drbg_blocklen(drbg) : (bytes_to_return - generated_len); /* 10.4.2 step 13.2 and 14 */ memcpy(df_data + generated_len, X, blocklen); generated_len += blocklen; } ret = 0; out: memset(iv, 0, drbg_blocklen(drbg)); memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); memset(pad, 0, drbg_blocklen(drbg)); return ret; } /* * update function of CTR DRBG as defined in 10.2.1.2 * * The reseed variable has an enhanced meaning compared to the update * functions of the other DRBGs as follows: * 0 => initial seed from initialization * 1 => reseed via drbg_seed * 2 => first invocation from drbg_ctr_update when addtl is present. In * this case, the df_data scratchpad is not deleted so that it is * available for another calls to prevent calling the DF function * again. * 3 => second invocation from drbg_ctr_update. When the update function * was called with addtl, the df_data memory already contains the * DFed addtl information and we do not need to call DF again. */ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed, int reseed) { int ret = -EFAULT; /* 10.2.1.2 step 1 */ unsigned char *temp = drbg->scratchpad; unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) + drbg_blocklen(drbg); if (3 > reseed) memset(df_data, 0, drbg_statelen(drbg)); if (!reseed) { /* * The DRBG uses the CTR mode of the underlying AES cipher. The * CTR mode increments the counter value after the AES operation * but SP800-90A requires that the counter is incremented before * the AES operation. Hence, we increment it at the time we set * it by one. */ crypto_inc(drbg->V, drbg_blocklen(drbg)); ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C, drbg_keylen(drbg)); if (ret) goto out; } /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */ if (seed) { ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed); if (ret) goto out; } ret = drbg_kcapi_sym_ctr(drbg, df_data, drbg_statelen(drbg), temp, drbg_statelen(drbg)); if (ret) return ret; /* 10.2.1.2 step 5 */ ret = crypto_skcipher_setkey(drbg->ctr_handle, temp, drbg_keylen(drbg)); if (ret) goto out; /* 10.2.1.2 step 6 */ memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg)); /* See above: increment counter by one to compensate timing of CTR op */ crypto_inc(drbg->V, drbg_blocklen(drbg)); ret = 0; out: memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg)); if (2 != reseed) memset(df_data, 0, drbg_statelen(drbg)); return ret; } /* * scratchpad use: drbg_ctr_update is called independently from * drbg_ctr_extract_bytes. Therefore, the scratchpad is reused */ /* Generate function of CTR DRBG as defined in 10.2.1.5.2 */ static int drbg_ctr_generate(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct list_head *addtl) { int ret; int len = min_t(int, buflen, INT_MAX); /* 10.2.1.5.2 step 2 */ if (addtl && !list_empty(addtl)) { ret = drbg_ctr_update(drbg, addtl, 2); if (ret) return 0; } /* 10.2.1.5.2 step 4.1 */ ret = drbg_kcapi_sym_ctr(drbg, NULL, 0, buf, len); if (ret) return ret; /* 10.2.1.5.2 step 6 */ ret = drbg_ctr_update(drbg, NULL, 3); if (ret) len = ret; return len; } static const struct drbg_state_ops drbg_ctr_ops = { .update = drbg_ctr_update, .generate = drbg_ctr_generate, .crypto_init = drbg_init_sym_kernel, .crypto_fini = drbg_fini_sym_kernel, }; #endif /* CONFIG_CRYPTO_DRBG_CTR */ /****************************************************************** * HMAC DRBG callback functions ******************************************************************/ #if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC) static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval, const struct list_head *in); static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg, const unsigned char *key); static int drbg_init_hash_kernel(struct drbg_state *drbg); static int drbg_fini_hash_kernel(struct drbg_state *drbg); #endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */ #ifdef CONFIG_CRYPTO_DRBG_HMAC #define CRYPTO_DRBG_HMAC_STRING "HMAC " MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha512"); MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha512"); MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha384"); MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha384"); MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha256"); MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha256"); /* update function of HMAC DRBG as defined in 10.1.2.2 */ static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed, int reseed) { int ret = -EFAULT; int i = 0; struct drbg_string seed1, seed2, vdata; LIST_HEAD(seedlist); LIST_HEAD(vdatalist); if (!reseed) { /* 10.1.2.3 step 2 -- memset(0) of C is implicit with kzalloc */ memset(drbg->V, 1, drbg_statelen(drbg)); drbg_kcapi_hmacsetkey(drbg, drbg->C); } drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg)); list_add_tail(&seed1.list, &seedlist); /* buffer of seed2 will be filled in for loop below with one byte */ drbg_string_fill(&seed2, NULL, 1); list_add_tail(&seed2.list, &seedlist); /* input data of seed is allowed to be NULL at this point */ if (seed) list_splice_tail(seed, &seedlist); drbg_string_fill(&vdata, drbg->V, drbg_statelen(drbg)); list_add_tail(&vdata.list, &vdatalist); for (i = 2; 0 < i; i--) { /* first round uses 0x0, second 0x1 */ unsigned char prefix = DRBG_PREFIX0; if (1 == i) prefix = DRBG_PREFIX1; /* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */ seed2.buf = &prefix; ret = drbg_kcapi_hash(drbg, drbg->C, &seedlist); if (ret) return ret; drbg_kcapi_hmacsetkey(drbg, drbg->C); /* 10.1.2.2 step 2 and 5 -- HMAC for V */ ret = drbg_kcapi_hash(drbg, drbg->V, &vdatalist); if (ret) return ret; /* 10.1.2.2 step 3 */ if (!seed) return ret; } return 0; } /* generate function of HMAC DRBG as defined in 10.1.2.5 */ static int drbg_hmac_generate(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct list_head *addtl) { int len = 0; int ret = 0; struct drbg_string data; LIST_HEAD(datalist); /* 10.1.2.5 step 2 */ if (addtl && !list_empty(addtl)) { ret = drbg_hmac_update(drbg, addtl, 1); if (ret) return ret; } drbg_string_fill(&data, drbg->V, drbg_statelen(drbg)); list_add_tail(&data.list, &datalist); while (len < buflen) { unsigned int outlen = 0; /* 10.1.2.5 step 4.1 */ ret = drbg_kcapi_hash(drbg, drbg->V, &datalist); if (ret) return ret; outlen = (drbg_blocklen(drbg) < (buflen - len)) ? drbg_blocklen(drbg) : (buflen - len); /* 10.1.2.5 step 4.2 */ memcpy(buf + len, drbg->V, outlen); len += outlen; } /* 10.1.2.5 step 6 */ if (addtl && !list_empty(addtl)) ret = drbg_hmac_update(drbg, addtl, 1); else ret = drbg_hmac_update(drbg, NULL, 1); if (ret) return ret; return len; } static const struct drbg_state_ops drbg_hmac_ops = { .update = drbg_hmac_update, .generate = drbg_hmac_generate, .crypto_init = drbg_init_hash_kernel, .crypto_fini = drbg_fini_hash_kernel, }; #endif /* CONFIG_CRYPTO_DRBG_HMAC */ /****************************************************************** * Hash DRBG callback functions ******************************************************************/ #ifdef CONFIG_CRYPTO_DRBG_HASH #define CRYPTO_DRBG_HASH_STRING "HASH " MODULE_ALIAS_CRYPTO("drbg_pr_sha512"); MODULE_ALIAS_CRYPTO("drbg_nopr_sha512"); MODULE_ALIAS_CRYPTO("drbg_pr_sha384"); MODULE_ALIAS_CRYPTO("drbg_nopr_sha384"); MODULE_ALIAS_CRYPTO("drbg_pr_sha256"); MODULE_ALIAS_CRYPTO("drbg_nopr_sha256"); /* * Increment buffer * * @dst buffer to increment * @add value to add */ static inline void drbg_add_buf(unsigned char *dst, size_t dstlen, const unsigned char *add, size_t addlen) { /* implied: dstlen > addlen */ unsigned char *dstptr; const unsigned char *addptr; unsigned int remainder = 0; size_t len = addlen; dstptr = dst + (dstlen-1); addptr = add + (addlen-1); while (len) { remainder += *dstptr + *addptr; *dstptr = remainder & 0xff; remainder >>= 8; len--; dstptr--; addptr--; } len = dstlen - addlen; while (len && remainder > 0) { remainder = *dstptr + 1; *dstptr = remainder & 0xff; remainder >>= 8; len--; dstptr--; } } /* * scratchpad usage: as drbg_hash_update and drbg_hash_df are used * interlinked, the scratchpad is used as follows: * drbg_hash_update * start: drbg->scratchpad * length: drbg_statelen(drbg) * drbg_hash_df: * start: drbg->scratchpad + drbg_statelen(drbg) * length: drbg_blocklen(drbg) * * drbg_hash_process_addtl uses the scratchpad, but fully completes * before either of the functions mentioned before are invoked. Therefore, * drbg_hash_process_addtl does not need to be specifically considered. */ /* Derivation Function for Hash DRBG as defined in 10.4.1 */ static int drbg_hash_df(struct drbg_state *drbg, unsigned char *outval, size_t outlen, struct list_head *entropylist) { int ret = 0; size_t len = 0; unsigned char input[5]; unsigned char *tmp = drbg->scratchpad + drbg_statelen(drbg); struct drbg_string data; /* 10.4.1 step 3 */ input[0] = 1; drbg_cpu_to_be32((outlen * 8), &input[1]); /* 10.4.1 step 4.1 -- concatenation of data for input into hash */ drbg_string_fill(&data, input, 5); list_add(&data.list, entropylist); /* 10.4.1 step 4 */ while (len < outlen) { short blocklen = 0; /* 10.4.1 step 4.1 */ ret = drbg_kcapi_hash(drbg, tmp, entropylist); if (ret) goto out; /* 10.4.1 step 4.2 */ input[0]++; blocklen = (drbg_blocklen(drbg) < (outlen - len)) ? drbg_blocklen(drbg) : (outlen - len); memcpy(outval + len, tmp, blocklen); len += blocklen; } out: memset(tmp, 0, drbg_blocklen(drbg)); return ret; } /* update function for Hash DRBG as defined in 10.1.1.2 / 10.1.1.3 */ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed, int reseed) { int ret = 0; struct drbg_string data1, data2; LIST_HEAD(datalist); LIST_HEAD(datalist2); unsigned char *V = drbg->scratchpad; unsigned char prefix = DRBG_PREFIX1; if (!seed) return -EINVAL; if (reseed) { /* 10.1.1.3 step 1 */ memcpy(V, drbg->V, drbg_statelen(drbg)); drbg_string_fill(&data1, &prefix, 1); list_add_tail(&data1.list, &datalist); drbg_string_fill(&data2, V, drbg_statelen(drbg)); list_add_tail(&data2.list, &datalist); } list_splice_tail(seed, &datalist); /* 10.1.1.2 / 10.1.1.3 step 2 and 3 */ ret = drbg_hash_df(drbg, drbg->V, drbg_statelen(drbg), &datalist); if (ret) goto out; /* 10.1.1.2 / 10.1.1.3 step 4 */ prefix = DRBG_PREFIX0; drbg_string_fill(&data1, &prefix, 1); list_add_tail(&data1.list, &datalist2); drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg)); list_add_tail(&data2.list, &datalist2); /* 10.1.1.2 / 10.1.1.3 step 4 */ ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2); out: memset(drbg->scratchpad, 0, drbg_statelen(drbg)); return ret; } /* processing of additional information string for Hash DRBG */ static int drbg_hash_process_addtl(struct drbg_state *drbg, struct list_head *addtl) { int ret = 0; struct drbg_string data1, data2; LIST_HEAD(datalist); unsigned char prefix = DRBG_PREFIX2; /* 10.1.1.4 step 2 */ if (!addtl || list_empty(addtl)) return 0; /* 10.1.1.4 step 2a */ drbg_string_fill(&data1, &prefix, 1); drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg)); list_add_tail(&data1.list, &datalist); list_add_tail(&data2.list, &datalist); list_splice_tail(addtl, &datalist); ret = drbg_kcapi_hash(drbg, drbg->scratchpad, &datalist); if (ret) goto out; /* 10.1.1.4 step 2b */ drbg_add_buf(drbg->V, drbg_statelen(drbg), drbg->scratchpad, drbg_blocklen(drbg)); out: memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); return ret; } /* Hashgen defined in 10.1.1.4 */ static int drbg_hash_hashgen(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen) { int len = 0; int ret = 0; unsigned char *src = drbg->scratchpad; unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg); struct drbg_string data; LIST_HEAD(datalist); /* 10.1.1.4 step hashgen 2 */ memcpy(src, drbg->V, drbg_statelen(drbg)); drbg_string_fill(&data, src, drbg_statelen(drbg)); list_add_tail(&data.list, &datalist); while (len < buflen) { unsigned int outlen = 0; /* 10.1.1.4 step hashgen 4.1 */ ret = drbg_kcapi_hash(drbg, dst, &datalist); if (ret) { len = ret; goto out; } outlen = (drbg_blocklen(drbg) < (buflen - len)) ? drbg_blocklen(drbg) : (buflen - len); /* 10.1.1.4 step hashgen 4.2 */ memcpy(buf + len, dst, outlen); len += outlen; /* 10.1.1.4 hashgen step 4.3 */ if (len < buflen) crypto_inc(src, drbg_statelen(drbg)); } out: memset(drbg->scratchpad, 0, (drbg_statelen(drbg) + drbg_blocklen(drbg))); return len; } /* generate function for Hash DRBG as defined in 10.1.1.4 */ static int drbg_hash_generate(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct list_head *addtl) { int len = 0; int ret = 0; union { unsigned char req[8]; __be64 req_int; } u; unsigned char prefix = DRBG_PREFIX3; struct drbg_string data1, data2; LIST_HEAD(datalist); /* 10.1.1.4 step 2 */ ret = drbg_hash_process_addtl(drbg, addtl); if (ret) return ret; /* 10.1.1.4 step 3 */ len = drbg_hash_hashgen(drbg, buf, buflen); /* this is the value H as documented in 10.1.1.4 */ /* 10.1.1.4 step 4 */ drbg_string_fill(&data1, &prefix, 1); list_add_tail(&data1.list, &datalist); drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg)); list_add_tail(&data2.list, &datalist); ret = drbg_kcapi_hash(drbg, drbg->scratchpad, &datalist); if (ret) { len = ret; goto out; } /* 10.1.1.4 step 5 */ drbg_add_buf(drbg->V, drbg_statelen(drbg), drbg->scratchpad, drbg_blocklen(drbg)); drbg_add_buf(drbg->V, drbg_statelen(drbg), drbg->C, drbg_statelen(drbg)); u.req_int = cpu_to_be64(drbg->reseed_ctr); drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8); out: memset(drbg->scratchpad, 0, drbg_blocklen(drbg)); return len; } /* * scratchpad usage: as update and generate are used isolated, both * can use the scratchpad */ static const struct drbg_state_ops drbg_hash_ops = { .update = drbg_hash_update, .generate = drbg_hash_generate, .crypto_init = drbg_init_hash_kernel, .crypto_fini = drbg_fini_hash_kernel, }; #endif /* CONFIG_CRYPTO_DRBG_HASH */ /****************************************************************** * Functions common for DRBG implementations ******************************************************************/ static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed, int reseed, enum drbg_seed_state new_seed_state) { int ret = drbg->d_ops->update(drbg, seed, reseed); if (ret) return ret; drbg->seeded = new_seed_state; drbg->last_seed_time = jiffies; /* 10.1.1.2 / 10.1.1.3 step 5 */ drbg->reseed_ctr = 1; switch (drbg->seeded) { case DRBG_SEED_STATE_UNSEEDED: /* Impossible, but handle it to silence compiler warnings. */ fallthrough; case DRBG_SEED_STATE_PARTIAL: /* * Require frequent reseeds until the seed source is * fully initialized. */ drbg->reseed_threshold = 50; break; case DRBG_SEED_STATE_FULL: /* * Seed source has become fully initialized, frequent * reseeds no longer required. */ drbg->reseed_threshold = drbg_max_requests(drbg); break; } return ret; } static inline int drbg_get_random_bytes(struct drbg_state *drbg, unsigned char *entropy, unsigned int entropylen) { int ret; do { get_random_bytes(entropy, entropylen); ret = drbg_fips_continuous_test(drbg, entropy); if (ret && ret != -EAGAIN) return ret; } while (ret); return 0; } static int drbg_seed_from_random(struct drbg_state *drbg) { struct drbg_string data; LIST_HEAD(seedlist); unsigned int entropylen = drbg_sec_strength(drbg->core->flags); unsigned char entropy[32]; int ret; BUG_ON(!entropylen); BUG_ON(entropylen > sizeof(entropy)); drbg_string_fill(&data, entropy, entropylen); list_add_tail(&data.list, &seedlist); ret = drbg_get_random_bytes(drbg, entropy, entropylen); if (ret) goto out; ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL); out: memzero_explicit(entropy, entropylen); return ret; } static bool drbg_nopr_reseed_interval_elapsed(struct drbg_state *drbg) { unsigned long next_reseed; /* Don't ever reseed from get_random_bytes() in test mode. */ if (list_empty(&drbg->test_data.list)) return false; /* * Obtain fresh entropy for the nopr DRBGs after 300s have * elapsed in order to still achieve sort of partial * prediction resistance over the time domain at least. Note * that the period of 300s has been chosen to match the * CRNG_RESEED_INTERVAL of the get_random_bytes()' chacha * rngs. */ next_reseed = drbg->last_seed_time + 300 * HZ; return time_after(jiffies, next_reseed); } /* * Seeding or reseeding of the DRBG * * @drbg: DRBG state struct * @pers: personalization / additional information buffer * @reseed: 0 for initial seed process, 1 for reseeding * * return: * 0 on success * error value otherwise */ static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers, bool reseed) { int ret; unsigned char entropy[((32 + 16) * 2)]; unsigned int entropylen = drbg_sec_strength(drbg->core->flags); struct drbg_string data1; LIST_HEAD(seedlist); enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL; /* 9.1 / 9.2 / 9.3.1 step 3 */ if (pers && pers->len > (drbg_max_addtl(drbg))) { pr_devel("DRBG: personalization string too long %zu\n", pers->len); return -EINVAL; } if (list_empty(&drbg->test_data.list)) { drbg_string_fill(&data1, drbg->test_data.buf, drbg->test_data.len); pr_devel("DRBG: using test entropy\n"); } else { /* * Gather entropy equal to the security strength of the DRBG. * With a derivation function, a nonce is required in addition * to the entropy. A nonce must be at least 1/2 of the security * strength of the DRBG in size. Thus, entropy + nonce is 3/2 * of the strength. The consideration of a nonce is only * applicable during initial seeding. */ BUG_ON(!entropylen); if (!reseed) entropylen = ((entropylen + 1) / 2) * 3; BUG_ON((entropylen * 2) > sizeof(entropy)); /* Get seed from in-kernel /dev/urandom */ if (!rng_is_initialized()) new_seed_state = DRBG_SEED_STATE_PARTIAL; ret = drbg_get_random_bytes(drbg, entropy, entropylen); if (ret) goto out; if (!drbg->jent) { drbg_string_fill(&data1, entropy, entropylen); pr_devel("DRBG: (re)seeding with %u bytes of entropy\n", entropylen); } else { /* * Get seed from Jitter RNG, failures are * fatal only in FIPS mode. */ ret = crypto_rng_get_bytes(drbg->jent, entropy + entropylen, entropylen); if (fips_enabled && ret) { pr_devel("DRBG: jent failed with %d\n", ret); /* * Do not treat the transient failure of the * Jitter RNG as an error that needs to be * reported. The combined number of the * maximum reseed threshold times the maximum * number of Jitter RNG transient errors is * less than the reseed threshold required by * SP800-90A allowing us to treat the * transient errors as such. * * However, we mandate that at least the first * seeding operation must succeed with the * Jitter RNG. */ if (!reseed || ret != -EAGAIN) goto out; } drbg_string_fill(&data1, entropy, entropylen * 2); pr_devel("DRBG: (re)seeding with %u bytes of entropy\n", entropylen * 2); } } list_add_tail(&data1.list, &seedlist); /* * concatenation of entropy with personalization str / addtl input) * the variable pers is directly handed in by the caller, so check its * contents whether it is appropriate */ if (pers && pers->buf && 0 < pers->len) { list_add_tail(&pers->list, &seedlist); pr_devel("DRBG: using personalization string\n"); } if (!reseed) { memset(drbg->V, 0, drbg_statelen(drbg)); memset(drbg->C, 0, drbg_statelen(drbg)); } ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state); out: memzero_explicit(entropy, entropylen * 2); return ret; } /* Free all substructures in a DRBG state without the DRBG state structure */ static inline void drbg_dealloc_state(struct drbg_state *drbg) { if (!drbg) return; kfree_sensitive(drbg->Vbuf); drbg->Vbuf = NULL; drbg->V = NULL; kfree_sensitive(drbg->Cbuf); drbg->Cbuf = NULL; drbg->C = NULL; kfree_sensitive(drbg->scratchpadbuf); drbg->scratchpadbuf = NULL; drbg->reseed_ctr = 0; drbg->d_ops = NULL; drbg->core = NULL; if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { kfree_sensitive(drbg->prev); drbg->prev = NULL; drbg->fips_primed = false; } } /* * Allocate all sub-structures for a DRBG state. * The DRBG state structure must already be allocated. */ static inline int drbg_alloc_state(struct drbg_state *drbg) { int ret = -ENOMEM; unsigned int sb_size = 0; switch (drbg->core->flags & DRBG_TYPE_MASK) { #ifdef CONFIG_CRYPTO_DRBG_HMAC case DRBG_HMAC: drbg->d_ops = &drbg_hmac_ops; break; #endif /* CONFIG_CRYPTO_DRBG_HMAC */ #ifdef CONFIG_CRYPTO_DRBG_HASH case DRBG_HASH: drbg->d_ops = &drbg_hash_ops; break; #endif /* CONFIG_CRYPTO_DRBG_HASH */ #ifdef CONFIG_CRYPTO_DRBG_CTR case DRBG_CTR: drbg->d_ops = &drbg_ctr_ops; break; #endif /* CONFIG_CRYPTO_DRBG_CTR */ default: ret = -EOPNOTSUPP; goto err; } ret = drbg->d_ops->crypto_init(drbg); if (ret < 0) goto err; drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); if (!drbg->Vbuf) { ret = -ENOMEM; goto fini; } drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1); drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL); if (!drbg->Cbuf) { ret = -ENOMEM; goto fini; } drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1); /* scratchpad is only generated for CTR and Hash */ if (drbg->core->flags & DRBG_HMAC) sb_size = 0; else if (drbg->core->flags & DRBG_CTR) sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */ drbg_statelen(drbg) + /* df_data */ drbg_blocklen(drbg) + /* pad */ drbg_blocklen(drbg) + /* iv */ drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */ else sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg); if (0 < sb_size) { drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL); if (!drbg->scratchpadbuf) { ret = -ENOMEM; goto fini; } drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1); } if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags), GFP_KERNEL); if (!drbg->prev) { ret = -ENOMEM; goto fini; } drbg->fips_primed = false; } return 0; fini: drbg->d_ops->crypto_fini(drbg); err: drbg_dealloc_state(drbg); return ret; } /************************************************************************* * DRBG interface functions *************************************************************************/ /* * DRBG generate function as required by SP800-90A - this function * generates random numbers * * @drbg DRBG state handle * @buf Buffer where to store the random numbers -- the buffer must already * be pre-allocated by caller * @buflen Length of output buffer - this value defines the number of random * bytes pulled from DRBG * @addtl Additional input that is mixed into state, may be NULL -- note * the entropy is pulled by the DRBG internally unconditionally * as defined in SP800-90A. The additional input is mixed into * the state in addition to the pulled entropy. * * return: 0 when all bytes are generated; < 0 in case of an error */ static int drbg_generate(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct drbg_string *addtl) { int len = 0; LIST_HEAD(addtllist); if (!drbg->core) { pr_devel("DRBG: not yet seeded\n"); return -EINVAL; } if (0 == buflen || !buf) { pr_devel("DRBG: no output buffer provided\n"); return -EINVAL; } if (addtl && NULL == addtl->buf && 0 < addtl->len) { pr_devel("DRBG: wrong format of additional information\n"); return -EINVAL; } /* 9.3.1 step 2 */ len = -EINVAL; if (buflen > (drbg_max_request_bytes(drbg))) { pr_devel("DRBG: requested random numbers too large %u\n", buflen); goto err; } /* 9.3.1 step 3 is implicit with the chosen DRBG */ /* 9.3.1 step 4 */ if (addtl && addtl->len > (drbg_max_addtl(drbg))) { pr_devel("DRBG: additional information string too long %zu\n", addtl->len); goto err; } /* 9.3.1 step 5 is implicit with the chosen DRBG */ /* * 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented * here. The spec is a bit convoluted here, we make it simpler. */ if (drbg->reseed_threshold < drbg->reseed_ctr) drbg->seeded = DRBG_SEED_STATE_UNSEEDED; if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) { pr_devel("DRBG: reseeding before generation (prediction " "resistance: %s, state %s)\n", str_true_false(drbg->pr), (drbg->seeded == DRBG_SEED_STATE_FULL ? "seeded" : "unseeded")); /* 9.3.1 steps 7.1 through 7.3 */ len = drbg_seed(drbg, addtl, true); if (len) goto err; /* 9.3.1 step 7.4 */ addtl = NULL; } else if (rng_is_initialized() && (drbg->seeded == DRBG_SEED_STATE_PARTIAL || drbg_nopr_reseed_interval_elapsed(drbg))) { len = drbg_seed_from_random(drbg); if (len) goto err; } if (addtl && 0 < addtl->len) list_add_tail(&addtl->list, &addtllist); /* 9.3.1 step 8 and 10 */ len = drbg->d_ops->generate(drbg, buf, buflen, &addtllist); /* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */ drbg->reseed_ctr++; if (0 >= len) goto err; /* * Section 11.3.3 requires to re-perform self tests after some * generated random numbers. The chosen value after which self * test is performed is arbitrary, but it should be reasonable. * However, we do not perform the self tests because of the following * reasons: it is mathematically impossible that the initial self tests * were successfully and the following are not. If the initial would * pass and the following would not, the kernel integrity is violated. * In this case, the entire kernel operation is questionable and it * is unlikely that the integrity violation only affects the * correct operation of the DRBG. * * Albeit the following code is commented out, it is provided in * case somebody has a need to implement the test of 11.3.3. */ #if 0 if (drbg->reseed_ctr && !(drbg->reseed_ctr % 4096)) { int err = 0; pr_devel("DRBG: start to perform self test\n"); if (drbg->core->flags & DRBG_HMAC) err = alg_test("drbg_pr_hmac_sha512", "drbg_pr_hmac_sha512", 0, 0); else if (drbg->core->flags & DRBG_CTR) err = alg_test("drbg_pr_ctr_aes256", "drbg_pr_ctr_aes256", 0, 0); else err = alg_test("drbg_pr_sha256", "drbg_pr_sha256", 0, 0); if (err) { pr_err("DRBG: periodical self test failed\n"); /* * uninstantiate implies that from now on, only errors * are returned when reusing this DRBG cipher handle */ drbg_uninstantiate(drbg); return 0; } else { pr_devel("DRBG: self test successful\n"); } } #endif /* * All operations were successful, return 0 as mandated by * the kernel crypto API interface. */ len = 0; err: return len; } /* * Wrapper around drbg_generate which can pull arbitrary long strings * from the DRBG without hitting the maximum request limitation. * * Parameters: see drbg_generate * Return codes: see drbg_generate -- if one drbg_generate request fails, * the entire drbg_generate_long request fails */ static int drbg_generate_long(struct drbg_state *drbg, unsigned char *buf, unsigned int buflen, struct drbg_string *addtl) { unsigned int len = 0; unsigned int slice = 0; do { int err = 0; unsigned int chunk = 0; slice = ((buflen - len) / drbg_max_request_bytes(drbg)); chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len); mutex_lock(&drbg->drbg_mutex); err = drbg_generate(drbg, buf + len, chunk, addtl); mutex_unlock(&drbg->drbg_mutex); if (0 > err) return err; len += chunk; } while (slice > 0 && (len < buflen)); return 0; } static int drbg_prepare_hrng(struct drbg_state *drbg) { /* We do not need an HRNG in test mode. */ if (list_empty(&drbg->test_data.list)) return 0; drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0); if (IS_ERR(drbg->jent)) { const int err = PTR_ERR(drbg->jent); drbg->jent = NULL; if (fips_enabled) return err; pr_info("DRBG: Continuing without Jitter RNG\n"); } return 0; } /* * DRBG instantiation function as required by SP800-90A - this function * sets up the DRBG handle, performs the initial seeding and all sanity * checks required by SP800-90A * * @drbg memory of state -- if NULL, new memory is allocated * @pers Personalization string that is mixed into state, may be NULL -- note * the entropy is pulled by the DRBG internally unconditionally * as defined in SP800-90A. The additional input is mixed into * the state in addition to the pulled entropy. * @coreref reference to core * @pr prediction resistance enabled * * return * 0 on success * error value otherwise */ static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers, int coreref, bool pr) { int ret; bool reseed = true; pr_devel("DRBG: Initializing DRBG core %d with prediction resistance " "%s\n", coreref, str_enabled_disabled(pr)); mutex_lock(&drbg->drbg_mutex); /* 9.1 step 1 is implicit with the selected DRBG type */ /* * 9.1 step 2 is implicit as caller can select prediction resistance * and the flag is copied into drbg->flags -- * all DRBG types support prediction resistance */ /* 9.1 step 4 is implicit in drbg_sec_strength */ if (!drbg->core) { drbg->core = &drbg_cores[coreref]; drbg->pr = pr; drbg->seeded = DRBG_SEED_STATE_UNSEEDED; drbg->last_seed_time = 0; drbg->reseed_threshold = drbg_max_requests(drbg); ret = drbg_alloc_state(drbg); if (ret) goto unlock; ret = drbg_prepare_hrng(drbg); if (ret) goto free_everything; reseed = false; } ret = drbg_seed(drbg, pers, reseed); if (ret && !reseed) goto free_everything; mutex_unlock(&drbg->drbg_mutex); return ret; unlock: mutex_unlock(&drbg->drbg_mutex); return ret; free_everything: mutex_unlock(&drbg->drbg_mutex); drbg_uninstantiate(drbg); return ret; } /* * DRBG uninstantiate function as required by SP800-90A - this function * frees all buffers and the DRBG handle * * @drbg DRBG state handle * * return * 0 on success */ static int drbg_uninstantiate(struct drbg_state *drbg) { if (!IS_ERR_OR_NULL(drbg->jent)) crypto_free_rng(drbg->jent); drbg->jent = NULL; if (drbg->d_ops) drbg->d_ops->crypto_fini(drbg); drbg_dealloc_state(drbg); /* no scrubbing of test_data -- this shall survive an uninstantiate */ return 0; } /* * Helper function for setting the test data in the DRBG * * @drbg DRBG state handle * @data test data * @len test data length */ static void drbg_kcapi_set_entropy(struct crypto_rng *tfm, const u8 *data, unsigned int len) { struct drbg_state *drbg = crypto_rng_ctx(tfm); mutex_lock(&drbg->drbg_mutex); drbg_string_fill(&drbg->test_data, data, len); mutex_unlock(&drbg->drbg_mutex); } /*************************************************************** * Kernel crypto API cipher invocations requested by DRBG ***************************************************************/ #if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC) struct sdesc { struct shash_desc shash; char ctx[]; }; static int drbg_init_hash_kernel(struct drbg_state *drbg) { struct sdesc *sdesc; struct crypto_shash *tfm; tfm = crypto_alloc_shash(drbg->core->backend_cra_name, 0, 0); if (IS_ERR(tfm)) { pr_info("DRBG: could not allocate digest TFM handle: %s\n", drbg->core->backend_cra_name); return PTR_ERR(tfm); } BUG_ON(drbg_blocklen(drbg) != crypto_shash_digestsize(tfm)); sdesc = kzalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm), GFP_KERNEL); if (!sdesc) { crypto_free_shash(tfm); return -ENOMEM; } sdesc->shash.tfm = tfm; drbg->priv_data = sdesc; return 0; } static int drbg_fini_hash_kernel(struct drbg_state *drbg) { struct sdesc *sdesc = drbg->priv_data; if (sdesc) { crypto_free_shash(sdesc->shash.tfm); kfree_sensitive(sdesc); } drbg->priv_data = NULL; return 0; } static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg, const unsigned char *key) { struct sdesc *sdesc = drbg->priv_data; crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg)); } static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval, const struct list_head *in) { struct sdesc *sdesc = drbg->priv_data; struct drbg_string *input = NULL; crypto_shash_init(&sdesc->shash); list_for_each_entry(input, in, list) crypto_shash_update(&sdesc->shash, input->buf, input->len); return crypto_shash_final(&sdesc->shash, outval); } #endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */ #ifdef CONFIG_CRYPTO_DRBG_CTR static int drbg_fini_sym_kernel(struct drbg_state *drbg) { struct crypto_cipher *tfm = (struct crypto_cipher *)drbg->priv_data; if (tfm) crypto_free_cipher(tfm); drbg->priv_data = NULL; if (drbg->ctr_handle) crypto_free_skcipher(drbg->ctr_handle); drbg->ctr_handle = NULL; if (drbg->ctr_req) skcipher_request_free(drbg->ctr_req); drbg->ctr_req = NULL; kfree(drbg->outscratchpadbuf); drbg->outscratchpadbuf = NULL; return 0; } static int drbg_init_sym_kernel(struct drbg_state *drbg) { struct crypto_cipher *tfm; struct crypto_skcipher *sk_tfm; struct skcipher_request *req; unsigned int alignmask; char ctr_name[CRYPTO_MAX_ALG_NAME]; tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0); if (IS_ERR(tfm)) { pr_info("DRBG: could not allocate cipher TFM handle: %s\n", drbg->core->backend_cra_name); return PTR_ERR(tfm); } BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm)); drbg->priv_data = tfm; if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) { drbg_fini_sym_kernel(drbg); return -EINVAL; } sk_tfm = crypto_alloc_skcipher(ctr_name, 0, 0); if (IS_ERR(sk_tfm)) { pr_info("DRBG: could not allocate CTR cipher TFM handle: %s\n", ctr_name); drbg_fini_sym_kernel(drbg); return PTR_ERR(sk_tfm); } drbg->ctr_handle = sk_tfm; crypto_init_wait(&drbg->ctr_wait); req = skcipher_request_alloc(sk_tfm, GFP_KERNEL); if (!req) { pr_info("DRBG: could not allocate request queue\n"); drbg_fini_sym_kernel(drbg); return -ENOMEM; } drbg->ctr_req = req; skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &drbg->ctr_wait); alignmask = crypto_skcipher_alignmask(sk_tfm); drbg->outscratchpadbuf = kmalloc(DRBG_OUTSCRATCHLEN + alignmask, GFP_KERNEL); if (!drbg->outscratchpadbuf) { drbg_fini_sym_kernel(drbg); return -ENOMEM; } drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf, alignmask + 1); sg_init_table(&drbg->sg_in, 1); sg_init_one(&drbg->sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN); return alignmask; } static void drbg_kcapi_symsetkey(struct drbg_state *drbg, const unsigned char *key) { struct crypto_cipher *tfm = drbg->priv_data; crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg))); } static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, const struct drbg_string *in) { struct crypto_cipher *tfm = drbg->priv_data; /* there is only component in *in */ BUG_ON(in->len < drbg_blocklen(drbg)); crypto_cipher_encrypt_one(tfm, outval, in->buf); return 0; } static int drbg_kcapi_sym_ctr(struct drbg_state *drbg, u8 *inbuf, u32 inlen, u8 *outbuf, u32 outlen) { struct scatterlist *sg_in = &drbg->sg_in, *sg_out = &drbg->sg_out; u32 scratchpad_use = min_t(u32, outlen, DRBG_OUTSCRATCHLEN); int ret; if (inbuf) { /* Use caller-provided input buffer */ sg_set_buf(sg_in, inbuf, inlen); } else { /* Use scratchpad for in-place operation */ inlen = scratchpad_use; memset(drbg->outscratchpad, 0, scratchpad_use); sg_set_buf(sg_in, drbg->outscratchpad, scratchpad_use); } while (outlen) { u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN); /* Output buffer may not be valid for SGL, use scratchpad */ skcipher_request_set_crypt(drbg->ctr_req, sg_in, sg_out, cryptlen, drbg->V); ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req), &drbg->ctr_wait); if (ret) goto out; crypto_init_wait(&drbg->ctr_wait); memcpy(outbuf, drbg->outscratchpad, cryptlen); memzero_explicit(drbg->outscratchpad, cryptlen); outlen -= cryptlen; outbuf += cryptlen; } ret = 0; out: return ret; } #endif /* CONFIG_CRYPTO_DRBG_CTR */ /*************************************************************** * Kernel crypto API interface to register DRBG ***************************************************************/ /* * Look up the DRBG flags by given kernel crypto API cra_name * The code uses the drbg_cores definition to do this * * @cra_name kernel crypto API cra_name * @coreref reference to integer which is filled with the pointer to * the applicable core * @pr reference for setting prediction resistance * * return: flags */ static inline void drbg_convert_tfm_core(const char *cra_driver_name, int *coreref, bool *pr) { int i = 0; size_t start = 0; int len = 0; *pr = true; /* disassemble the names */ if (!memcmp(cra_driver_name, "drbg_nopr_", 10)) { start = 10; *pr = false; } else if (!memcmp(cra_driver_name, "drbg_pr_", 8)) { start = 8; } else { return; } /* remove the first part */ len = strlen(cra_driver_name) - start; for (i = 0; ARRAY_SIZE(drbg_cores) > i; i++) { if (!memcmp(cra_driver_name + start, drbg_cores[i].cra_name, len)) { *coreref = i; return; } } } static int drbg_kcapi_init(struct crypto_tfm *tfm) { struct drbg_state *drbg = crypto_tfm_ctx(tfm); mutex_init(&drbg->drbg_mutex); return 0; } static void drbg_kcapi_cleanup(struct crypto_tfm *tfm) { drbg_uninstantiate(crypto_tfm_ctx(tfm)); } /* * Generate random numbers invoked by the kernel crypto API: * The API of the kernel crypto API is extended as follows: * * src is additional input supplied to the RNG. * slen is the length of src. * dst is the output buffer where random data is to be stored. * dlen is the length of dst. */ static int drbg_kcapi_random(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { struct drbg_state *drbg = crypto_rng_ctx(tfm); struct drbg_string *addtl = NULL; struct drbg_string string; if (slen) { /* linked list variable is now local to allow modification */ drbg_string_fill(&string, src, slen); addtl = &string; } return drbg_generate_long(drbg, dst, dlen, addtl); } /* * Seed the DRBG invoked by the kernel crypto API */ static int drbg_kcapi_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { struct drbg_state *drbg = crypto_rng_ctx(tfm); struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm); bool pr = false; struct drbg_string string; struct drbg_string *seed_string = NULL; int coreref = 0; drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref, &pr); if (0 < slen) { drbg_string_fill(&string, seed, slen); seed_string = &string; } return drbg_instantiate(drbg, seed_string, coreref, pr); } /*************************************************************** * Kernel module: code to load the module ***************************************************************/ /* * Tests as defined in 11.3.2 in addition to the cipher tests: testing * of the error handling. * * Note: testing of failing seed source as defined in 11.3.2 is not applicable * as seed source of get_random_bytes does not fail. * * Note 2: There is no sensible way of testing the reseed counter * enforcement, so skip it. */ static inline int __init drbg_healthcheck_sanity(void) { int len = 0; #define OUTBUFLEN 16 unsigned char buf[OUTBUFLEN]; struct drbg_state *drbg = NULL; int ret; int rc = -EFAULT; bool pr = false; int coreref = 0; struct drbg_string addtl; size_t max_addtllen, max_request_bytes; /* only perform test in FIPS mode */ if (!fips_enabled) return 0; #ifdef CONFIG_CRYPTO_DRBG_CTR drbg_convert_tfm_core("drbg_nopr_ctr_aes256", &coreref, &pr); #endif #ifdef CONFIG_CRYPTO_DRBG_HASH drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr); #endif #ifdef CONFIG_CRYPTO_DRBG_HMAC drbg_convert_tfm_core("drbg_nopr_hmac_sha512", &coreref, &pr); #endif drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL); if (!drbg) return -ENOMEM; mutex_init(&drbg->drbg_mutex); drbg->core = &drbg_cores[coreref]; drbg->reseed_threshold = drbg_max_requests(drbg); /* * if the following tests fail, it is likely that there is a buffer * overflow as buf is much smaller than the requested or provided * string lengths -- in case the error handling does not succeed * we may get an OOPS. And we want to get an OOPS as this is a * grave bug. */ max_addtllen = drbg_max_addtl(drbg); max_request_bytes = drbg_max_request_bytes(drbg); drbg_string_fill(&addtl, buf, max_addtllen + 1); /* overflow addtllen with additonal info string */ len = drbg_generate(drbg, buf, OUTBUFLEN, &addtl); BUG_ON(0 < len); /* overflow max_bits */ len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL); BUG_ON(0 < len); /* overflow max addtllen with personalization string */ ret = drbg_seed(drbg, &addtl, false); BUG_ON(0 == ret); /* all tests passed */ rc = 0; pr_devel("DRBG: Sanity tests for failure code paths successfully " "completed\n"); kfree(drbg); return rc; } static struct rng_alg drbg_algs[22]; /* * Fill the array drbg_algs used to register the different DRBGs * with the kernel crypto API. To fill the array, the information * from drbg_cores[] is used. */ static inline void __init drbg_fill_array(struct rng_alg *alg, const struct drbg_core *core, int pr) { int pos = 0; static int priority = 200; memcpy(alg->base.cra_name, "stdrng", 6); if (pr) { memcpy(alg->base.cra_driver_name, "drbg_pr_", 8); pos = 8; } else { memcpy(alg->base.cra_driver_name, "drbg_nopr_", 10); pos = 10; } memcpy(alg->base.cra_driver_name + pos, core->cra_name, strlen(core->cra_name)); alg->base.cra_priority = priority; priority++; /* * If FIPS mode enabled, the selected DRBG shall have the * highest cra_priority over other stdrng instances to ensure * it is selected. */ if (fips_enabled) alg->base.cra_priority += 200; alg->base.cra_ctxsize = sizeof(struct drbg_state); alg->base.cra_module = THIS_MODULE; alg->base.cra_init = drbg_kcapi_init; alg->base.cra_exit = drbg_kcapi_cleanup; alg->generate = drbg_kcapi_random; alg->seed = drbg_kcapi_seed; alg->set_ent = drbg_kcapi_set_entropy; alg->seedsize = 0; } static int __init drbg_init(void) { unsigned int i = 0; /* pointer to drbg_algs */ unsigned int j = 0; /* pointer to drbg_cores */ int ret; ret = drbg_healthcheck_sanity(); if (ret) return ret; if (ARRAY_SIZE(drbg_cores) * 2 > ARRAY_SIZE(drbg_algs)) { pr_info("DRBG: Cannot register all DRBG types" "(slots needed: %zu, slots available: %zu)\n", ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs)); return -EFAULT; } /* * each DRBG definition can be used with PR and without PR, thus * we instantiate each DRBG in drbg_cores[] twice. * * As the order of placing them into the drbg_algs array matters * (the later DRBGs receive a higher cra_priority) we register the * prediction resistance DRBGs first as the should not be too * interesting. */ for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++) drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1); for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++) drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0); return crypto_register_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); } static void __exit drbg_exit(void) { crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2)); } subsys_initcall(drbg_init); module_exit(drbg_exit); #ifndef CRYPTO_DRBG_HASH_STRING #define CRYPTO_DRBG_HASH_STRING "" #endif #ifndef CRYPTO_DRBG_HMAC_STRING #define CRYPTO_DRBG_HMAC_STRING "" #endif #ifndef CRYPTO_DRBG_CTR_STRING #define CRYPTO_DRBG_CTR_STRING "" #endif MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) " "using following cores: " CRYPTO_DRBG_HASH_STRING CRYPTO_DRBG_HMAC_STRING CRYPTO_DRBG_CTR_STRING); MODULE_ALIAS_CRYPTO("stdrng"); MODULE_IMPORT_NS("CRYPTO_INTERNAL");
42 23 6 13 1 1 23 1 2 11 10 4 35 1 11 27 1 5 14 44 20 39 17 12 1 4 15 36 18 1 15 13 10 2 1 3 10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 // SPDX-License-Identifier: GPL-2.0 #include <linux/file.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/utime.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include <linux/compat.h> #include <asm/unistd.h> #include <linux/filelock.h> static bool nsec_valid(long nsec) { if (nsec == UTIME_OMIT || nsec == UTIME_NOW) return true; return nsec >= 0 && nsec <= 999999999; } int vfs_utimes(const struct path *path, struct timespec64 *times) { int error; struct iattr newattrs; struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; if (times) { if (!nsec_valid(times[0].tv_nsec) || !nsec_valid(times[1].tv_nsec)) return -EINVAL; if (times[0].tv_nsec == UTIME_NOW && times[1].tv_nsec == UTIME_NOW) times = NULL; } error = mnt_want_write(path->mnt); if (error) goto out; newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME; if (times) { if (times[0].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_ATIME; else if (times[0].tv_nsec != UTIME_NOW) { newattrs.ia_atime = times[0]; newattrs.ia_valid |= ATTR_ATIME_SET; } if (times[1].tv_nsec == UTIME_OMIT) newattrs.ia_valid &= ~ATTR_MTIME; else if (times[1].tv_nsec != UTIME_NOW) { newattrs.ia_mtime = times[1]; newattrs.ia_valid |= ATTR_MTIME_SET; } /* * Tell setattr_prepare(), that this is an explicit time * update, even if neither ATTR_ATIME_SET nor ATTR_MTIME_SET * were used. */ newattrs.ia_valid |= ATTR_TIMES_SET; } else { newattrs.ia_valid |= ATTR_TOUCH; } retry_deleg: inode_lock(inode); error = notify_change(mnt_idmap(path->mnt), path->dentry, &newattrs, &delegated_inode); inode_unlock(inode); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(path->mnt); out: return error; } static int do_utimes_path(int dfd, const char __user *filename, struct timespec64 *times, int flags) { struct path path; int lookup_flags = 0, error; if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) return -EINVAL; if (!(flags & AT_SYMLINK_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; if (flags & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (error) return error; error = vfs_utimes(&path, times); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } static int do_utimes_fd(int fd, struct timespec64 *times, int flags) { if (flags) return -EINVAL; CLASS(fd, f)(fd); if (fd_empty(f)) return -EBADF; return vfs_utimes(&fd_file(f)->f_path, times); } /* * do_utimes - change times on filename or file descriptor * @dfd: open file descriptor, -1 or AT_FDCWD * @filename: path name or NULL * @times: new times or NULL * @flags: zero or more flags (only AT_SYMLINK_NOFOLLOW for the moment) * * If filename is NULL and dfd refers to an open file, then operate on * the file. Otherwise look up filename, possibly using dfd as a * starting point. * * If times==NULL, set access and modification to current time, * must be owner or have write permission. * Else, update from *times, must be owner or super user. */ long do_utimes(int dfd, const char __user *filename, struct timespec64 *times, int flags) { if (filename == NULL && dfd != AT_FDCWD) return do_utimes_fd(dfd, times, flags); return do_utimes_path(dfd, filename, times, flags); } SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename, struct __kernel_timespec __user *, utimes, int, flags) { struct timespec64 tstimes[2]; if (utimes) { if ((get_timespec64(&tstimes[0], &utimes[0]) || get_timespec64(&tstimes[1], &utimes[1]))) return -EFAULT; /* Nothing to do, we must not even check the path. */ if (tstimes[0].tv_nsec == UTIME_OMIT && tstimes[1].tv_nsec == UTIME_OMIT) return 0; } return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags); } #ifdef __ARCH_WANT_SYS_UTIME /* * futimesat(), utimes() and utime() are older versions of utimensat() * that are provided for compatibility with traditional C libraries. * On modern architectures, we always use libc wrappers around * utimensat() instead. */ static long do_futimesat(int dfd, const char __user *filename, struct __kernel_old_timeval __user *utimes) { struct __kernel_old_timeval times[2]; struct timespec64 tstimes[2]; if (utimes) { if (copy_from_user(&times, utimes, sizeof(times))) return -EFAULT; /* This test is needed to catch all invalid values. If we would test only in do_utimes we would miss those invalid values truncated by the multiplication with 1000. Note that we also catch UTIME_{NOW,OMIT} here which are only valid for utimensat. */ if (times[0].tv_usec >= 1000000 || times[0].tv_usec < 0 || times[1].tv_usec >= 1000000 || times[1].tv_usec < 0) return -EINVAL; tstimes[0].tv_sec = times[0].tv_sec; tstimes[0].tv_nsec = 1000 * times[0].tv_usec; tstimes[1].tv_sec = times[1].tv_sec; tstimes[1].tv_nsec = 1000 * times[1].tv_usec; } return do_utimes(dfd, filename, utimes ? tstimes : NULL, 0); } SYSCALL_DEFINE3(futimesat, int, dfd, const char __user *, filename, struct __kernel_old_timeval __user *, utimes) { return do_futimesat(dfd, filename, utimes); } SYSCALL_DEFINE2(utimes, char __user *, filename, struct __kernel_old_timeval __user *, utimes) { return do_futimesat(AT_FDCWD, filename, utimes); } SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times) { struct timespec64 tv[2]; if (times) { if (get_user(tv[0].tv_sec, &times->actime) || get_user(tv[1].tv_sec, &times->modtime)) return -EFAULT; tv[0].tv_nsec = 0; tv[1].tv_nsec = 0; } return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0); } #endif #ifdef CONFIG_COMPAT_32BIT_TIME /* * Not all architectures have sys_utime, so implement this in terms * of sys_utimes. */ #ifdef __ARCH_WANT_SYS_UTIME32 SYSCALL_DEFINE2(utime32, const char __user *, filename, struct old_utimbuf32 __user *, t) { struct timespec64 tv[2]; if (t) { if (get_user(tv[0].tv_sec, &t->actime) || get_user(tv[1].tv_sec, &t->modtime)) return -EFAULT; tv[0].tv_nsec = 0; tv[1].tv_nsec = 0; } return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0); } #endif SYSCALL_DEFINE4(utimensat_time32, unsigned int, dfd, const char __user *, filename, struct old_timespec32 __user *, t, int, flags) { struct timespec64 tv[2]; if (t) { if (get_old_timespec32(&tv[0], &t[0]) || get_old_timespec32(&tv[1], &t[1])) return -EFAULT; if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT) return 0; } return do_utimes(dfd, filename, t ? tv : NULL, flags); } #ifdef __ARCH_WANT_SYS_UTIME32 static long do_compat_futimesat(unsigned int dfd, const char __user *filename, struct old_timeval32 __user *t) { struct timespec64 tv[2]; if (t) { if (get_user(tv[0].tv_sec, &t[0].tv_sec) || get_user(tv[0].tv_nsec, &t[0].tv_usec) || get_user(tv[1].tv_sec, &t[1].tv_sec) || get_user(tv[1].tv_nsec, &t[1].tv_usec)) return -EFAULT; if (tv[0].tv_nsec >= 1000000 || tv[0].tv_nsec < 0 || tv[1].tv_nsec >= 1000000 || tv[1].tv_nsec < 0) return -EINVAL; tv[0].tv_nsec *= 1000; tv[1].tv_nsec *= 1000; } return do_utimes(dfd, filename, t ? tv : NULL, 0); } SYSCALL_DEFINE3(futimesat_time32, unsigned int, dfd, const char __user *, filename, struct old_timeval32 __user *, t) { return do_compat_futimesat(dfd, filename, t); } SYSCALL_DEFINE2(utimes_time32, const char __user *, filename, struct old_timeval32 __user *, t) { return do_compat_futimesat(AT_FDCWD, filename, t); } #endif #endif
5787 5787 15 5779 679 7 679 5509 5511 5505 3745 9653 3876 3878 61 409 303 352 353 788 5 783 2484 814 55 815 55 1862 55 1856 365 454 13 2483 2482 1322 185 235 1135 52 139 139 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 // SPDX-License-Identifier: GPL-2.0-only #include <linux/bitmap.h> #include <linux/bug.h> #include <linux/export.h> #include <linux/idr.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/xarray.h> /** * idr_alloc_u32() - Allocate an ID. * @idr: IDR handle. * @ptr: Pointer to be associated with the new ID. * @nextid: Pointer to an ID. * @max: The maximum ID to allocate (inclusive). * @gfp: Memory allocation flags. * * Allocates an unused ID in the range specified by @nextid and @max. * Note that @max is inclusive whereas the @end parameter to idr_alloc() * is exclusive. The new ID is assigned to @nextid before the pointer * is inserted into the IDR, so if @nextid points into the object pointed * to by @ptr, a concurrent lookup will not find an uninitialised ID. * * The caller should provide their own locking to ensure that two * concurrent modifications to the IDR are not possible. Read-only * accesses to the IDR may be done under the RCU read lock or may * exclude simultaneous writers. * * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed, * or -ENOSPC if no free IDs could be found. If an error occurred, * @nextid is unchanged. */ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid, unsigned long max, gfp_t gfp) { struct radix_tree_iter iter; void __rcu **slot; unsigned int base = idr->idr_base; unsigned int id = *nextid; if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR))) idr->idr_rt.xa_flags |= IDR_RT_MARKER; id = (id < base) ? 0 : id - base; radix_tree_iter_init(&iter, id); slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base); if (IS_ERR(slot)) return PTR_ERR(slot); *nextid = iter.index + base; /* there is a memory barrier inside radix_tree_iter_replace() */ radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr); radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE); return 0; } EXPORT_SYMBOL_GPL(idr_alloc_u32); /** * idr_alloc() - Allocate an ID. * @idr: IDR handle. * @ptr: Pointer to be associated with the new ID. * @start: The minimum ID (inclusive). * @end: The maximum ID (exclusive). * @gfp: Memory allocation flags. * * Allocates an unused ID in the range specified by @start and @end. If * @end is <= 0, it is treated as one larger than %INT_MAX. This allows * callers to use @start + N as @end as long as N is within integer range. * * The caller should provide their own locking to ensure that two * concurrent modifications to the IDR are not possible. Read-only * accesses to the IDR may be done under the RCU read lock or may * exclude simultaneous writers. * * Return: The newly allocated ID, -ENOMEM if memory allocation failed, * or -ENOSPC if no free IDs could be found. */ int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) { u32 id = start; int ret; if (WARN_ON_ONCE(start < 0)) return -EINVAL; ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp); if (ret) return ret; return id; } EXPORT_SYMBOL_GPL(idr_alloc); /** * idr_alloc_cyclic() - Allocate an ID cyclically. * @idr: IDR handle. * @ptr: Pointer to be associated with the new ID. * @start: The minimum ID (inclusive). * @end: The maximum ID (exclusive). * @gfp: Memory allocation flags. * * Allocates an unused ID in the range specified by @start and @end. If * @end is <= 0, it is treated as one larger than %INT_MAX. This allows * callers to use @start + N as @end as long as N is within integer range. * The search for an unused ID will start at the last ID allocated and will * wrap around to @start if no free IDs are found before reaching @end. * * The caller should provide their own locking to ensure that two * concurrent modifications to the IDR are not possible. Read-only * accesses to the IDR may be done under the RCU read lock or may * exclude simultaneous writers. * * Return: The newly allocated ID, -ENOMEM if memory allocation failed, * or -ENOSPC if no free IDs could be found. */ int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp) { u32 id = idr->idr_next; int err, max = end > 0 ? end - 1 : INT_MAX; if ((int)id < start) id = start; err = idr_alloc_u32(idr, ptr, &id, max, gfp); if ((err == -ENOSPC) && (id > start)) { id = start; err = idr_alloc_u32(idr, ptr, &id, max, gfp); } if (err) return err; idr->idr_next = id + 1; return id; } EXPORT_SYMBOL(idr_alloc_cyclic); /** * idr_remove() - Remove an ID from the IDR. * @idr: IDR handle. * @id: Pointer ID. * * Removes this ID from the IDR. If the ID was not previously in the IDR, * this function returns %NULL. * * Since this function modifies the IDR, the caller should provide their * own locking to ensure that concurrent modification of the same IDR is * not possible. * * Return: The pointer formerly associated with this ID. */ void *idr_remove(struct idr *idr, unsigned long id) { return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL); } EXPORT_SYMBOL_GPL(idr_remove); /** * idr_find() - Return pointer for given ID. * @idr: IDR handle. * @id: Pointer ID. * * Looks up the pointer associated with this ID. A %NULL pointer may * indicate that @id is not allocated or that the %NULL pointer was * associated with this ID. * * This function can be called under rcu_read_lock(), given that the leaf * pointers lifetimes are correctly managed. * * Return: The pointer associated with this ID. */ void *idr_find(const struct idr *idr, unsigned long id) { return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base); } EXPORT_SYMBOL_GPL(idr_find); /** * idr_for_each() - Iterate through all stored pointers. * @idr: IDR handle. * @fn: Function to be called for each pointer. * @data: Data passed to callback function. * * The callback function will be called for each entry in @idr, passing * the ID, the entry and @data. * * If @fn returns anything other than %0, the iteration stops and that * value is returned from this function. * * idr_for_each() can be called concurrently with idr_alloc() and * idr_remove() if protected by RCU. Newly added entries may not be * seen and deleted entries may be seen, but adding and removing entries * will not cause other entries to be skipped, nor spurious ones to be seen. */ int idr_for_each(const struct idr *idr, int (*fn)(int id, void *p, void *data), void *data) { struct radix_tree_iter iter; void __rcu **slot; int base = idr->idr_base; radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) { int ret; unsigned long id = iter.index + base; if (WARN_ON_ONCE(id > INT_MAX)) break; ret = fn(id, rcu_dereference_raw(*slot), data); if (ret) return ret; } return 0; } EXPORT_SYMBOL(idr_for_each); /** * idr_get_next_ul() - Find next populated entry. * @idr: IDR handle. * @nextid: Pointer to an ID. * * Returns the next populated entry in the tree with an ID greater than * or equal to the value pointed to by @nextid. On exit, @nextid is updated * to the ID of the found value. To use in a loop, the value pointed to by * nextid must be incremented by the user. */ void *idr_get_next_ul(struct idr *idr, unsigned long *nextid) { struct radix_tree_iter iter; void __rcu **slot; void *entry = NULL; unsigned long base = idr->idr_base; unsigned long id = *nextid; id = (id < base) ? 0 : id - base; radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) { entry = rcu_dereference_raw(*slot); if (!entry) continue; if (!xa_is_internal(entry)) break; if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry)) break; slot = radix_tree_iter_retry(&iter); } if (!slot) return NULL; *nextid = iter.index + base; return entry; } EXPORT_SYMBOL(idr_get_next_ul); /** * idr_get_next() - Find next populated entry. * @idr: IDR handle. * @nextid: Pointer to an ID. * * Returns the next populated entry in the tree with an ID greater than * or equal to the value pointed to by @nextid. On exit, @nextid is updated * to the ID of the found value. To use in a loop, the value pointed to by * nextid must be incremented by the user. */ void *idr_get_next(struct idr *idr, int *nextid) { unsigned long id = *nextid; void *entry = idr_get_next_ul(idr, &id); if (WARN_ON_ONCE(id > INT_MAX)) return NULL; *nextid = id; return entry; } EXPORT_SYMBOL(idr_get_next); /** * idr_replace() - replace pointer for given ID. * @idr: IDR handle. * @ptr: New pointer to associate with the ID. * @id: ID to change. * * Replace the pointer registered with an ID and return the old value. * This function can be called under the RCU read lock concurrently with * idr_alloc() and idr_remove() (as long as the ID being removed is not * the one being replaced!). * * Returns: the old value on success. %-ENOENT indicates that @id was not * found. %-EINVAL indicates that @ptr was not valid. */ void *idr_replace(struct idr *idr, void *ptr, unsigned long id) { struct radix_tree_node *node; void __rcu **slot = NULL; void *entry; id -= idr->idr_base; entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) return ERR_PTR(-ENOENT); __radix_tree_replace(&idr->idr_rt, node, slot, ptr); return entry; } EXPORT_SYMBOL(idr_replace); /** * DOC: IDA description * * The IDA is an ID allocator which does not provide the ability to * associate an ID with a pointer. As such, it only needs to store one * bit per ID, and so is more space efficient than an IDR. To use an IDA, * define it using DEFINE_IDA() (or embed a &struct ida in a data structure, * then initialise it using ida_init()). To allocate a new ID, call * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range(). * To free an ID, call ida_free(). * * ida_destroy() can be used to dispose of an IDA without needing to * free the individual IDs in it. You can use ida_is_empty() to find * out whether the IDA has any IDs currently allocated. * * The IDA handles its own locking. It is safe to call any of the IDA * functions without synchronisation in your code. * * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward * limitation, it should be quite straightforward to raise the maximum. */ /* * Developer's notes: * * The IDA uses the functionality provided by the XArray to store bitmaps in * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap * have been set. * * I considered telling the XArray that each slot is an order-10 node * and indexing by bit number, but the XArray can't allow a single multi-index * entry in the head, which would significantly increase memory consumption * for the IDA. So instead we divide the index by the number of bits in the * leaf bitmap before doing a radix tree lookup. * * As an optimisation, if there are only a few low bits set in any given * leaf, instead of allocating a 128-byte bitmap, we store the bits * as a value entry. Value entries never have the XA_FREE_MARK cleared * because we can always convert them into a bitmap entry. * * It would be possible to optimise further; once we've run out of a * single 128-byte bitmap, we currently switch to a 576-byte node, put * the 128-byte bitmap in the first entry and then start allocating extra * 128-byte entries. We could instead use the 512 bytes of the node's * data as a bitmap before moving to that scheme. I do not believe this * is a worthwhile optimisation; Rasmus Villemoes surveyed the current * users of the IDA and almost none of them use more than 1024 entries. * Those that do use more than the 8192 IDs that the 512 bytes would * provide. * * The IDA always uses a lock to alloc/free. If we add a 'test_bit' * equivalent, it will still need locking. Going to RCU lookup would require * using RCU to free bitmaps, and that's not trivial without embedding an * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte * bitmap, which is excessive. */ /** * ida_alloc_range() - Allocate an unused ID. * @ida: IDA handle. * @min: Lowest ID to allocate. * @max: Highest ID to allocate. * @gfp: Memory allocation flags. * * Allocate an ID between @min and @max, inclusive. The allocated ID will * not exceed %INT_MAX, even if @max is larger. * * Context: Any context. It is safe to call this function without * locking in your code. * Return: The allocated ID, or %-ENOMEM if memory could not be allocated, * or %-ENOSPC if there are no free IDs. */ int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max, gfp_t gfp) { XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS); unsigned bit = min % IDA_BITMAP_BITS; unsigned long flags; struct ida_bitmap *bitmap, *alloc = NULL; if ((int)min < 0) return -ENOSPC; if ((int)max < 0) max = INT_MAX; retry: xas_lock_irqsave(&xas, flags); next: bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK); if (xas.xa_index > min / IDA_BITMAP_BITS) bit = 0; if (xas.xa_index * IDA_BITMAP_BITS + bit > max) goto nospc; if (xa_is_value(bitmap)) { unsigned long tmp = xa_to_value(bitmap); if (bit < BITS_PER_XA_VALUE) { bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit); if (xas.xa_index * IDA_BITMAP_BITS + bit > max) goto nospc; if (bit < BITS_PER_XA_VALUE) { tmp |= 1UL << bit; xas_store(&xas, xa_mk_value(tmp)); goto out; } } bitmap = alloc; if (!bitmap) bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT); if (!bitmap) goto alloc; bitmap->bitmap[0] = tmp; xas_store(&xas, bitmap); if (xas_error(&xas)) { bitmap->bitmap[0] = 0; goto out; } } if (bitmap) { bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit); if (xas.xa_index * IDA_BITMAP_BITS + bit > max) goto nospc; if (bit == IDA_BITMAP_BITS) goto next; __set_bit(bit, bitmap->bitmap); if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) xas_clear_mark(&xas, XA_FREE_MARK); } else { if (bit < BITS_PER_XA_VALUE) { bitmap = xa_mk_value(1UL << bit); } else { bitmap = alloc; if (!bitmap) bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT); if (!bitmap) goto alloc; __set_bit(bit, bitmap->bitmap); } xas_store(&xas, bitmap); } out: xas_unlock_irqrestore(&xas, flags); if (xas_nomem(&xas, gfp)) { xas.xa_index = min / IDA_BITMAP_BITS; bit = min % IDA_BITMAP_BITS; goto retry; } if (bitmap != alloc) kfree(alloc); if (xas_error(&xas)) return xas_error(&xas); return xas.xa_index * IDA_BITMAP_BITS + bit; alloc: xas_unlock_irqrestore(&xas, flags); alloc = kzalloc(sizeof(*bitmap), gfp); if (!alloc) return -ENOMEM; xas_set(&xas, min / IDA_BITMAP_BITS); bit = min % IDA_BITMAP_BITS; goto retry; nospc: xas_unlock_irqrestore(&xas, flags); kfree(alloc); return -ENOSPC; } EXPORT_SYMBOL(ida_alloc_range); /** * ida_free() - Release an allocated ID. * @ida: IDA handle. * @id: Previously allocated ID. * * Context: Any context. It is safe to call this function without * locking in your code. */ void ida_free(struct ida *ida, unsigned int id) { XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS); unsigned bit = id % IDA_BITMAP_BITS; struct ida_bitmap *bitmap; unsigned long flags; if ((int)id < 0) return; xas_lock_irqsave(&xas, flags); bitmap = xas_load(&xas); if (xa_is_value(bitmap)) { unsigned long v = xa_to_value(bitmap); if (bit >= BITS_PER_XA_VALUE) goto err; if (!(v & (1UL << bit))) goto err; v &= ~(1UL << bit); if (!v) goto delete; xas_store(&xas, xa_mk_value(v)); } else { if (!bitmap || !test_bit(bit, bitmap->bitmap)) goto err; __clear_bit(bit, bitmap->bitmap); xas_set_mark(&xas, XA_FREE_MARK); if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) { kfree(bitmap); delete: xas_store(&xas, NULL); } } xas_unlock_irqrestore(&xas, flags); return; err: xas_unlock_irqrestore(&xas, flags); WARN(1, "ida_free called for id=%d which is not allocated.\n", id); } EXPORT_SYMBOL(ida_free); /** * ida_destroy() - Free all IDs. * @ida: IDA handle. * * Calling this function frees all IDs and releases all resources used * by an IDA. When this call returns, the IDA is empty and can be reused * or freed. If the IDA is already empty, there is no need to call this * function. * * Context: Any context. It is safe to call this function without * locking in your code. */ void ida_destroy(struct ida *ida) { XA_STATE(xas, &ida->xa, 0); struct ida_bitmap *bitmap; unsigned long flags; xas_lock_irqsave(&xas, flags); xas_for_each(&xas, bitmap, ULONG_MAX) { if (!xa_is_value(bitmap)) kfree(bitmap); xas_store(&xas, NULL); } xas_unlock_irqrestore(&xas, flags); } EXPORT_SYMBOL(ida_destroy); #ifndef __KERNEL__ extern void xa_dump_index(unsigned long index, unsigned int shift); #define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS) static void ida_dump_entry(void *entry, unsigned long index) { unsigned long i; if (!entry) return; if (xa_is_node(entry)) { struct xa_node *node = xa_to_node(entry); unsigned int shift = node->shift + IDA_CHUNK_SHIFT + XA_CHUNK_SHIFT; xa_dump_index(index * IDA_BITMAP_BITS, shift); xa_dump_node(node); for (i = 0; i < XA_CHUNK_SIZE; i++) ida_dump_entry(node->slots[i], index | (i << node->shift)); } else if (xa_is_value(entry)) { xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG)); pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry); } else { struct ida_bitmap *bitmap = entry; xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT); pr_cont("bitmap: %p data", bitmap); for (i = 0; i < IDA_BITMAP_LONGS; i++) pr_cont(" %lx", bitmap->bitmap[i]); pr_cont("\n"); } } static void ida_dump(struct ida *ida) { struct xarray *xa = &ida->xa; pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head, xa->xa_flags >> ROOT_TAG_SHIFT); ida_dump_entry(xa->xa_head, 0); } #endif
10 10 1 1 1 2 4 1 1 1497 1497 331 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008-2011, Intel Corporation. * * Description: Data Center Bridging netlink interface * Author: Lucy Liu <lucy.liu@intel.com> */ #include <linux/netdevice.h> #include <linux/netlink.h> #include <linux/slab.h> #include <net/netlink.h> #include <net/rtnetlink.h> #include <linux/dcbnl.h> #include <net/dcbevent.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <net/sock.h> /* Data Center Bridging (DCB) is a collection of Ethernet enhancements * intended to allow network traffic with differing requirements * (highly reliable, no drops vs. best effort vs. low latency) to operate * and co-exist on Ethernet. Current DCB features are: * * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a * framework for assigning bandwidth guarantees to traffic classes. * * Priority-based Flow Control (PFC) - provides a flow control mechanism which * can work independently for each 802.1p priority. * * Congestion Notification - provides a mechanism for end-to-end congestion * control for protocols which do not have built-in congestion management. * * More information about the emerging standards for these Ethernet features * can be found at: http://www.ieee802.org/1/pages/dcbridges.html * * This file implements an rtnetlink interface to allow configuration of DCB * features for capable devices. */ /**************** DCB attribute policies *************************************/ /* DCB netlink attributes policy */ static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = { [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1}, [DCB_ATTR_STATE] = {.type = NLA_U8}, [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED}, [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED}, [DCB_ATTR_SET_ALL] = {.type = NLA_U8}, [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG}, [DCB_ATTR_CAP] = {.type = NLA_NESTED}, [DCB_ATTR_PFC_STATE] = {.type = NLA_U8}, [DCB_ATTR_BCN] = {.type = NLA_NESTED}, [DCB_ATTR_APP] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE] = {.type = NLA_NESTED}, [DCB_ATTR_DCBX] = {.type = NLA_U8}, [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED}, }; /* DCB priority flow control to User Priority nested attributes */ static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = { [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG}, }; /* DCB priority grouping nested attributes */ static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = { [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED}, [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED}, [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8}, [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG}, }; /* DCB traffic class nested attributes. */ static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = { [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8}, [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG}, }; /* DCB capabilities nested attributes. */ static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = { [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_CAP_ATTR_PG] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC] = {.type = NLA_U8}, [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8}, [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8}, [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8}, [DCB_CAP_ATTR_GSP] = {.type = NLA_U8}, [DCB_CAP_ATTR_BCN] = {.type = NLA_U8}, [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8}, }; /* DCB capabilities nested attributes. */ static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = { [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8}, [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8}, }; /* DCB BCN nested attributes. */ static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = { [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG}, [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32}, [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32}, [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32}, [DCB_BCN_ATTR_BETA] = {.type = NLA_U32}, [DCB_BCN_ATTR_GD] = {.type = NLA_U32}, [DCB_BCN_ATTR_GI] = {.type = NLA_U32}, [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32}, [DCB_BCN_ATTR_TD] = {.type = NLA_U32}, [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32}, [DCB_BCN_ATTR_W] = {.type = NLA_U32}, [DCB_BCN_ATTR_RD] = {.type = NLA_U32}, [DCB_BCN_ATTR_RU] = {.type = NLA_U32}, [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32}, [DCB_BCN_ATTR_RI] = {.type = NLA_U32}, [DCB_BCN_ATTR_C] = {.type = NLA_U32}, [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, }; /* DCB APP nested attributes. */ static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = { [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8}, [DCB_APP_ATTR_ID] = {.type = NLA_U16}, [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8}, }; /* IEEE 802.1Qaz nested attributes. */ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = { [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)}, [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)}, [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)}, [DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)}, [DCB_ATTR_DCB_APP_TRUST_TABLE] = {.type = NLA_NESTED}, }; /* DCB number of traffic classes nested attributes. */ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = { [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG}, [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8}, [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8}, [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8}, }; static LIST_HEAD(dcb_app_list); static LIST_HEAD(dcb_rewr_list); static DEFINE_SPINLOCK(dcb_lock); static enum ieee_attrs_app dcbnl_app_attr_type_get(u8 selector) { switch (selector) { case IEEE_8021QAZ_APP_SEL_ETHERTYPE: case IEEE_8021QAZ_APP_SEL_STREAM: case IEEE_8021QAZ_APP_SEL_DGRAM: case IEEE_8021QAZ_APP_SEL_ANY: case IEEE_8021QAZ_APP_SEL_DSCP: return DCB_ATTR_IEEE_APP; case DCB_APP_SEL_PCP: return DCB_ATTR_DCB_APP; default: return DCB_ATTR_IEEE_APP_UNSPEC; } } static bool dcbnl_app_attr_type_validate(enum ieee_attrs_app type) { switch (type) { case DCB_ATTR_IEEE_APP: case DCB_ATTR_DCB_APP: return true; default: return false; } } static bool dcbnl_app_selector_validate(enum ieee_attrs_app type, u8 selector) { return dcbnl_app_attr_type_get(selector) == type; } static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq, u32 flags, struct nlmsghdr **nlhp) { struct sk_buff *skb; struct dcbmsg *dcb; struct nlmsghdr *nlh; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return NULL; nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags); BUG_ON(!nlh); dcb = nlmsg_data(nlh); dcb->dcb_family = AF_UNSPEC; dcb->cmd = cmd; dcb->dcb_pad = 0; if (nlhp) *nlhp = nlh; return skb; } static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */ if (!netdev->dcbnl_ops->getstate) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_STATE, netdev->dcbnl_ops->getstate(netdev)); } static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_PFC_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->getpfccfg) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX, tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG); if (!nest) return -EMSGSIZE; if (data[DCB_PFC_UP_ATTR_ALL]) getall = 1; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { if (!getall && !data[i]) continue; netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } nla_nest_end(skb, nest); return 0; } static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 perm_addr[MAX_ADDR_LEN]; if (!netdev->dcbnl_ops->getpermhwaddr) return -EOPNOTSUPP; memset(perm_addr, 0, sizeof(perm_addr)); netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr); return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); } static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_CAP]) return -EINVAL; if (!netdev->dcbnl_ops->getcap) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP], dcbnl_cap_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP); if (!nest) return -EMSGSIZE; if (data[DCB_CAP_ATTR_ALL]) getall = 1; for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) { if (!getall && !data[i]) continue; if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) { ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } } nla_nest_end(skb, nest); return 0; } static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest; u8 value; int ret; int i; int getall = 0; if (!tb[DCB_ATTR_NUMTCS]) return -EINVAL; if (!netdev->dcbnl_ops->getnumtcs) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_numtcs_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS); if (!nest) return -EMSGSIZE; if (data[DCB_NUMTCS_ATTR_ALL]) getall = 1; for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { if (!getall && !data[i]) continue; ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value); if (!ret) { ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); return ret; } } else return -EINVAL; } nla_nest_end(skb, nest); return 0; } static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1]; int ret; u8 value; int i; if (!tb[DCB_ATTR_NUMTCS]) return -EINVAL; if (!netdev->dcbnl_ops->setnumtcs) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS], dcbnl_numtcs_nest, NULL); if (ret) return ret; for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value); if (ret) break; } return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret); } static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { if (!netdev->dcbnl_ops->getpfcstate) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_PFC_STATE, netdev->dcbnl_ops->getpfcstate(netdev)); } static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!tb[DCB_ATTR_PFC_STATE]) return -EINVAL; if (!netdev->dcbnl_ops->setpfcstate) return -EOPNOTSUPP; value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]); netdev->dcbnl_ops->setpfcstate(netdev, value); return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0); } static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *app_nest; struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; u16 id; u8 up, idtype; int ret; if (!tb[DCB_ATTR_APP]) return -EINVAL; ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_app_nest, NULL); if (ret) return ret; /* all must be non-null */ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || (!app_tb[DCB_APP_ATTR_ID])) return -EINVAL; /* either by eth type or by socket number */ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && (idtype != DCB_APP_IDTYPE_PORTNUM)) return -EINVAL; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); if (netdev->dcbnl_ops->getapp) { ret = netdev->dcbnl_ops->getapp(netdev, idtype, id); if (ret < 0) return ret; else up = ret; } else { struct dcb_app app = { .selector = idtype, .protocol = id, }; up = dcb_getapp(netdev, &app); } app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP); if (!app_nest) return -EMSGSIZE; ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype); if (ret) goto out_cancel; ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id); if (ret) goto out_cancel; ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up); if (ret) goto out_cancel; nla_nest_end(skb, app_nest); return 0; out_cancel: nla_nest_cancel(skb, app_nest); return ret; } static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { int ret; u16 id; u8 up, idtype; struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1]; if (!tb[DCB_ATTR_APP]) return -EINVAL; ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP], dcbnl_app_nest, NULL); if (ret) return ret; /* all must be non-null */ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) || (!app_tb[DCB_APP_ATTR_ID]) || (!app_tb[DCB_APP_ATTR_PRIORITY])) return -EINVAL; /* either by eth type or by socket number */ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]); if ((idtype != DCB_APP_IDTYPE_ETHTYPE) && (idtype != DCB_APP_IDTYPE_PORTNUM)) return -EINVAL; id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]); up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]); if (netdev->dcbnl_ops->setapp) { ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up); if (ret < 0) return ret; } else { struct dcb_app app; app.selector = idtype; app.protocol = id; app.priority = up; ret = dcb_setapp(netdev, &app); } ret = nla_put_u8(skb, DCB_ATTR_APP, ret); dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0); return ret; } static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, struct nlattr **tb, struct sk_buff *skb, int dir) { struct nlattr *pg_nest, *param_nest, *data; struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; u8 prio, pgid, tc_pct, up_map; int ret; int getall = 0; int i; if (!tb[DCB_ATTR_PG_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->getpgtccfgtx || !netdev->dcbnl_ops->getpgtccfgrx || !netdev->dcbnl_ops->getpgbwgcfgtx || !netdev->dcbnl_ops->getpgbwgcfgrx) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest, NULL); if (ret) return ret; pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG); if (!pg_nest) return -EMSGSIZE; if (pg_tb[DCB_PG_ATTR_TC_ALL]) getall = 1; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { if (!getall && !pg_tb[i]) continue; if (pg_tb[DCB_PG_ATTR_TC_ALL]) data = pg_tb[DCB_PG_ATTR_TC_ALL]; else data = pg_tb[i]; ret = nla_parse_nested_deprecated(param_tb, DCB_TC_ATTR_PARAM_MAX, data, dcbnl_tc_param_nest, NULL); if (ret) goto err_pg; param_nest = nla_nest_start_noflag(skb, i); if (!param_nest) goto err_pg; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (dir) { /* Rx */ netdev->dcbnl_ops->getpgtccfgrx(netdev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); } else { /* Tx */ netdev->dcbnl_ops->getpgtccfgtx(netdev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); } if (param_tb[DCB_TC_ATTR_PARAM_PGID] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); if (ret) goto err_param; } if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] || param_tb[DCB_TC_ATTR_PARAM_ALL]) { ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); if (ret) goto err_param; } nla_nest_end(skb, param_nest); } if (pg_tb[DCB_PG_ATTR_BW_ID_ALL]) getall = 1; else getall = 0; for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { if (!getall && !pg_tb[i]) continue; tc_pct = DCB_ATTR_VALUE_UNDEFINED; if (dir) { /* Rx */ netdev->dcbnl_ops->getpgbwgcfgrx(netdev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); } else { /* Tx */ netdev->dcbnl_ops->getpgbwgcfgtx(netdev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); } ret = nla_put_u8(skb, i, tc_pct); if (ret) goto err_pg; } nla_nest_end(skb, pg_nest); return 0; err_param: nla_nest_cancel(skb, param_nest); err_pg: nla_nest_cancel(skb, pg_nest); return -EMSGSIZE; } static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0); } static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1); } static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!tb[DCB_ATTR_STATE]) return -EINVAL; if (!netdev->dcbnl_ops->setstate) return -EOPNOTSUPP; value = nla_get_u8(tb[DCB_ATTR_STATE]); return nla_put_u8(skb, DCB_ATTR_STATE, netdev->dcbnl_ops->setstate(netdev, value)); } static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1]; int i; int ret; u8 value; if (!tb[DCB_ATTR_PFC_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->setpfccfg) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX, tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL); if (ret) return ret; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); netdev->dcbnl_ops->setpfccfg(netdev, data[i]->nla_type - DCB_PFC_UP_ATTR_0, value); } return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0); } static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { int ret; if (!tb[DCB_ATTR_SET_ALL]) return -EINVAL; if (!netdev->dcbnl_ops->setall) return -EOPNOTSUPP; ret = nla_put_u8(skb, DCB_ATTR_SET_ALL, netdev->dcbnl_ops->setall(netdev)); dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0); return ret; } static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb, int dir) { struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1]; struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1]; int ret; int i; u8 pgid; u8 up_map; u8 prio; u8 tc_pct; if (!tb[DCB_ATTR_PG_CFG]) return -EINVAL; if (!netdev->dcbnl_ops->setpgtccfgtx || !netdev->dcbnl_ops->setpgtccfgrx || !netdev->dcbnl_ops->setpgbwgcfgtx || !netdev->dcbnl_ops->setpgbwgcfgrx) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest, NULL); if (ret) return ret; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { if (!pg_tb[i]) continue; ret = nla_parse_nested_deprecated(param_tb, DCB_TC_ATTR_PARAM_MAX, pg_tb[i], dcbnl_tc_param_nest, NULL); if (ret) return ret; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]) prio = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]); if (param_tb[DCB_TC_ATTR_PARAM_PGID]) pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]); if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT]) tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]); if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]) up_map = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]); /* dir: Tx = 0, Rx = 1 */ if (dir) { /* Rx */ netdev->dcbnl_ops->setpgtccfgrx(netdev, i - DCB_PG_ATTR_TC_0, prio, pgid, tc_pct, up_map); } else { /* Tx */ netdev->dcbnl_ops->setpgtccfgtx(netdev, i - DCB_PG_ATTR_TC_0, prio, pgid, tc_pct, up_map); } } for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { if (!pg_tb[i]) continue; tc_pct = nla_get_u8(pg_tb[i]); /* dir: Tx = 0, Rx = 1 */ if (dir) { /* Rx */ netdev->dcbnl_ops->setpgbwgcfgrx(netdev, i - DCB_PG_ATTR_BW_ID_0, tc_pct); } else { /* Tx */ netdev->dcbnl_ops->setpgbwgcfgtx(netdev, i - DCB_PG_ATTR_BW_ID_0, tc_pct); } } return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0); } static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0); } static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1); } static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *bcn_nest; struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1]; u8 value_byte; u32 value_integer; int ret; bool getall = false; int i; if (!tb[DCB_ATTR_BCN]) return -EINVAL; if (!netdev->dcbnl_ops->getbcnrp || !netdev->dcbnl_ops->getbcncfg) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], dcbnl_bcn_nest, NULL); if (ret) return ret; bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN); if (!bcn_nest) return -EMSGSIZE; if (bcn_tb[DCB_BCN_ATTR_ALL]) getall = true; for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { if (!getall && !bcn_tb[i]) continue; netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0, &value_byte); ret = nla_put_u8(skb, i, value_byte); if (ret) goto err_bcn; } for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { if (!getall && !bcn_tb[i]) continue; netdev->dcbnl_ops->getbcncfg(netdev, i, &value_integer); ret = nla_put_u32(skb, i, value_integer); if (ret) goto err_bcn; } nla_nest_end(skb, bcn_nest); return 0; err_bcn: nla_nest_cancel(skb, bcn_nest); return ret; } static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_BCN_ATTR_MAX + 1]; int i; int ret; u8 value_byte; u32 value_int; if (!tb[DCB_ATTR_BCN]) return -EINVAL; if (!netdev->dcbnl_ops->setbcncfg || !netdev->dcbnl_ops->setbcnrp) return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN], dcbnl_bcn_nest, NULL); if (ret) return ret; for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { if (data[i] == NULL) continue; value_byte = nla_get_u8(data[i]); netdev->dcbnl_ops->setbcnrp(netdev, data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte); } for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { if (data[i] == NULL) continue; value_int = nla_get_u32(data[i]); netdev->dcbnl_ops->setbcncfg(netdev, i, value_int); } return nla_put_u8(skb, DCB_ATTR_BCN, 0); } static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, int app_nested_type, int app_info_type, int app_entry_type) { struct dcb_peer_app_info info; struct dcb_app *table = NULL; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; u16 app_count; int err; /** * retrieve the peer app configuration form the driver. If the driver * handlers fail exit without doing anything */ err = ops->peer_getappinfo(netdev, &info, &app_count); if (!err && app_count) { table = kmalloc_array(app_count, sizeof(struct dcb_app), GFP_KERNEL); if (!table) return -ENOMEM; err = ops->peer_getapptable(netdev, table); } if (!err) { u16 i; struct nlattr *app; /** * build the message, from here on the only possible failure * is due to the skb size */ err = -EMSGSIZE; app = nla_nest_start_noflag(skb, app_nested_type); if (!app) goto nla_put_failure; if (app_info_type && nla_put(skb, app_info_type, sizeof(info), &info)) goto nla_put_failure; for (i = 0; i < app_count; i++) { if (nla_put(skb, app_entry_type, sizeof(struct dcb_app), &table[i])) goto nla_put_failure; } nla_nest_end(skb, app); } err = 0; nla_put_failure: kfree(table); return err; } static int dcbnl_getapptrust(struct net_device *netdev, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; enum ieee_attrs_app type; struct nlattr *apptrust; int nselectors, err, i; u8 *selectors; selectors = kzalloc(IEEE_8021QAZ_APP_SEL_MAX + 1, GFP_KERNEL); if (!selectors) return -ENOMEM; err = ops->dcbnl_getapptrust(netdev, selectors, &nselectors); if (err) { err = 0; goto out; } apptrust = nla_nest_start(skb, DCB_ATTR_DCB_APP_TRUST_TABLE); if (!apptrust) { err = -EMSGSIZE; goto out; } for (i = 0; i < nselectors; i++) { type = dcbnl_app_attr_type_get(selectors[i]); err = nla_put_u8(skb, type, selectors[i]); if (err) { nla_nest_cancel(skb, apptrust); goto out; } } nla_nest_end(skb, apptrust); out: kfree(selectors); return err; } /* Set or delete APP table or rewrite table entries. The APP struct is validated * and the appropriate callback function is called. */ static int dcbnl_app_table_setdel(struct nlattr *attr, struct net_device *netdev, int (*setdel)(struct net_device *dev, struct dcb_app *app)) { struct dcb_app *app_data; enum ieee_attrs_app type; struct nlattr *attr_itr; int rem, err; nla_for_each_nested(attr_itr, attr, rem) { type = nla_type(attr_itr); if (!dcbnl_app_attr_type_validate(type)) continue; if (nla_len(attr_itr) < sizeof(struct dcb_app)) return -ERANGE; app_data = nla_data(attr_itr); if (!dcbnl_app_selector_validate(type, app_data->selector)) return -EINVAL; err = setdel(netdev, app_data); if (err) return err; } return 0; } /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee, *app, *rewr; struct dcb_app_type *itr; int dcbx; int err; if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) return -EMSGSIZE; ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE); if (!ieee) return -EMSGSIZE; if (ops->ieee_getets) { struct ieee_ets ets; memset(&ets, 0, sizeof(ets)); err = ops->ieee_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) return -EMSGSIZE; } if (ops->ieee_getmaxrate) { struct ieee_maxrate maxrate; memset(&maxrate, 0, sizeof(maxrate)); err = ops->ieee_getmaxrate(netdev, &maxrate); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, sizeof(maxrate), &maxrate); if (err) return -EMSGSIZE; } } if (ops->ieee_getqcn) { struct ieee_qcn qcn; memset(&qcn, 0, sizeof(qcn)); err = ops->ieee_getqcn(netdev, &qcn); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_QCN, sizeof(qcn), &qcn); if (err) return -EMSGSIZE; } } if (ops->ieee_getqcnstats) { struct ieee_qcn_stats qcn_stats; memset(&qcn_stats, 0, sizeof(qcn_stats)); err = ops->ieee_getqcnstats(netdev, &qcn_stats); if (!err) { err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS, sizeof(qcn_stats), &qcn_stats); if (err) return -EMSGSIZE; } } if (ops->ieee_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) return -EMSGSIZE; } if (ops->dcbnl_getbuffer) { struct dcbnl_buffer buffer; memset(&buffer, 0, sizeof(buffer)); err = ops->dcbnl_getbuffer(netdev, &buffer); if (!err && nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer)) return -EMSGSIZE; } app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE); if (!app) return -EMSGSIZE; spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { enum ieee_attrs_app type = dcbnl_app_attr_type_get(itr->app.selector); err = nla_put(skb, type, sizeof(itr->app), &itr->app); if (err) { spin_unlock_bh(&dcb_lock); return -EMSGSIZE; } } } if (netdev->dcbnl_ops->getdcbx) dcbx = netdev->dcbnl_ops->getdcbx(netdev); else dcbx = -EOPNOTSUPP; spin_unlock_bh(&dcb_lock); nla_nest_end(skb, app); rewr = nla_nest_start(skb, DCB_ATTR_DCB_REWR_TABLE); if (!rewr) return -EMSGSIZE; spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_rewr_list, list) { if (itr->ifindex == netdev->ifindex) { enum ieee_attrs_app type = dcbnl_app_attr_type_get(itr->app.selector); err = nla_put(skb, type, sizeof(itr->app), &itr->app); if (err) { spin_unlock_bh(&dcb_lock); nla_nest_cancel(skb, rewr); return -EMSGSIZE; } } } spin_unlock_bh(&dcb_lock); nla_nest_end(skb, rewr); if (ops->dcbnl_getapptrust) { err = dcbnl_getapptrust(netdev, skb); if (err) return err; } /* get peer info if available */ if (ops->ieee_peer_getets) { struct ieee_ets ets; memset(&ets, 0, sizeof(ets)); err = ops->ieee_peer_getets(netdev, &ets); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) return -EMSGSIZE; } if (ops->ieee_peer_getpfc) { struct ieee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->ieee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) return -EMSGSIZE; } if (ops->peer_getappinfo && ops->peer_getapptable) { err = dcbnl_build_peer_app(netdev, skb, DCB_ATTR_IEEE_PEER_APP, DCB_ATTR_IEEE_APP_UNSPEC, DCB_ATTR_IEEE_APP); if (err) return -EMSGSIZE; } nla_nest_end(skb, ieee); if (dcbx >= 0) { err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); if (err) return -EMSGSIZE; } return 0; } static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev, int dir) { u8 pgid, up_map, prio, tc_pct; const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG; struct nlattr *pg = nla_nest_start_noflag(skb, i); if (!pg) return -EMSGSIZE; for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) { struct nlattr *tc_nest = nla_nest_start_noflag(skb, i); if (!tc_nest) return -EMSGSIZE; pgid = DCB_ATTR_VALUE_UNDEFINED; prio = DCB_ATTR_VALUE_UNDEFINED; tc_pct = DCB_ATTR_VALUE_UNDEFINED; up_map = DCB_ATTR_VALUE_UNDEFINED; if (!dir) ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); else ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, &prio, &pgid, &tc_pct, &up_map); if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) || nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct)) return -EMSGSIZE; nla_nest_end(skb, tc_nest); } for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) { tc_pct = DCB_ATTR_VALUE_UNDEFINED; if (!dir) ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); else ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, &tc_pct); if (nla_put_u8(skb, i, tc_pct)) return -EMSGSIZE; } nla_nest_end(skb, pg); return 0; } static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev) { struct nlattr *cee, *app; struct dcb_app_type *itr; const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; int dcbx, i, err = -EMSGSIZE; u8 value; if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name)) goto nla_put_failure; cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE); if (!cee) goto nla_put_failure; /* local pg */ if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) { err = dcbnl_cee_pg_fill(skb, netdev, 1); if (err) goto nla_put_failure; } if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) { err = dcbnl_cee_pg_fill(skb, netdev, 0); if (err) goto nla_put_failure; } /* local pfc */ if (ops->getpfccfg) { struct nlattr *pfc_nest = nla_nest_start_noflag(skb, DCB_ATTR_CEE_PFC); if (!pfc_nest) goto nla_put_failure; for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); if (nla_put_u8(skb, i, value)) goto nla_put_failure; } nla_nest_end(skb, pfc_nest); } /* local app */ spin_lock_bh(&dcb_lock); app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE); if (!app) goto dcb_unlock; list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == netdev->ifindex) { struct nlattr *app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP); if (!app_nest) goto dcb_unlock; err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, itr->app.selector); if (err) goto dcb_unlock; err = nla_put_u16(skb, DCB_APP_ATTR_ID, itr->app.protocol); if (err) goto dcb_unlock; err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, itr->app.priority); if (err) goto dcb_unlock; nla_nest_end(skb, app_nest); } } nla_nest_end(skb, app); if (netdev->dcbnl_ops->getdcbx) dcbx = netdev->dcbnl_ops->getdcbx(netdev); else dcbx = -EOPNOTSUPP; spin_unlock_bh(&dcb_lock); /* features flags */ if (ops->getfeatcfg) { struct nlattr *feat = nla_nest_start_noflag(skb, DCB_ATTR_CEE_FEAT); if (!feat) goto nla_put_failure; for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; i++) if (!ops->getfeatcfg(netdev, i, &value) && nla_put_u8(skb, i, value)) goto nla_put_failure; nla_nest_end(skb, feat); } /* peer info if available */ if (ops->cee_peer_getpg) { struct cee_pg pg; memset(&pg, 0, sizeof(pg)); err = ops->cee_peer_getpg(netdev, &pg); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) goto nla_put_failure; } if (ops->cee_peer_getpfc) { struct cee_pfc pfc; memset(&pfc, 0, sizeof(pfc)); err = ops->cee_peer_getpfc(netdev, &pfc); if (!err && nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) goto nla_put_failure; } if (ops->peer_getappinfo && ops->peer_getapptable) { err = dcbnl_build_peer_app(netdev, skb, DCB_ATTR_CEE_PEER_APP_TABLE, DCB_ATTR_CEE_PEER_APP_INFO, DCB_ATTR_CEE_PEER_APP); if (err) goto nla_put_failure; } nla_nest_end(skb, cee); /* DCBX state */ if (dcbx >= 0) { err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx); if (err) goto nla_put_failure; } return 0; dcb_unlock: spin_unlock_bh(&dcb_lock); nla_put_failure: err = -EMSGSIZE; return err; } static int dcbnl_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid, int dcbx_ver) { struct net *net = dev_net(dev); struct sk_buff *skb; struct nlmsghdr *nlh; const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops; int err; if (!ops) return -EOPNOTSUPP; skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh); if (!skb) return -ENOMEM; if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE) err = dcbnl_ieee_fill(skb, dev); else err = dcbnl_cee_fill(skb, dev); if (err < 0) { /* Report error to broadcast listeners */ nlmsg_free(skb); rtnl_set_sk_err(net, RTNLGRP_DCB, err); } else { /* End nlmsg and notify broadcast listeners */ nlmsg_end(skb, nlh); rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL); } return err; } int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid) { return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE); } EXPORT_SYMBOL(dcbnl_ieee_notify); int dcbnl_cee_notify(struct net_device *dev, int event, int cmd, u32 seq, u32 portid) { return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE); } EXPORT_SYMBOL(dcbnl_cee_notify); /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands. * If any requested operation can not be completed * the entire msg is aborted and error value is returned. * No attempt is made to reconcile the case where only part of the * cmd can be completed. */ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; int prio; int err; if (!ops) return -EOPNOTSUPP; if (!tb[DCB_ATTR_IEEE]) return -EINVAL; err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], dcbnl_ieee_policy, NULL); if (err) return err; if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) { struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]); err = ops->ieee_setets(netdev, ets); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) { struct ieee_maxrate *maxrate = nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]); err = ops->ieee_setmaxrate(netdev, maxrate); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) { struct ieee_qcn *qcn = nla_data(ieee[DCB_ATTR_IEEE_QCN]); err = ops->ieee_setqcn(netdev, qcn); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); err = ops->ieee_setpfc(netdev, pfc); if (err) goto err; } if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) { struct dcbnl_buffer *buffer = nla_data(ieee[DCB_ATTR_DCB_BUFFER]); for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) { if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) { err = -EINVAL; goto err; } } err = ops->dcbnl_setbuffer(netdev, buffer); if (err) goto err; } if (ieee[DCB_ATTR_DCB_REWR_TABLE]) { err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE], netdev, ops->dcbnl_setrewr ?: dcb_setrewr); if (err) goto err; } if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE], netdev, ops->ieee_setapp ?: dcb_ieee_setapp); if (err) goto err; } if (ieee[DCB_ATTR_DCB_APP_TRUST_TABLE]) { u8 selectors[IEEE_8021QAZ_APP_SEL_MAX + 1] = {0}; struct nlattr *attr; int nselectors = 0; int rem; if (!ops->dcbnl_setapptrust) { err = -EOPNOTSUPP; goto err; } nla_for_each_nested(attr, ieee[DCB_ATTR_DCB_APP_TRUST_TABLE], rem) { enum ieee_attrs_app type = nla_type(attr); u8 selector; int i; if (!dcbnl_app_attr_type_validate(type) || nla_len(attr) != 1 || nselectors >= sizeof(selectors)) { err = -EINVAL; goto err; } selector = nla_get_u8(attr); if (!dcbnl_app_selector_validate(type, selector)) { err = -EINVAL; goto err; } /* Duplicate selector ? */ for (i = 0; i < nselectors; i++) { if (selectors[i] == selector) { err = -EINVAL; goto err; } } selectors[nselectors++] = selector; } err = ops->dcbnl_setapptrust(netdev, selectors, nselectors); if (err) goto err; } err: err = nla_put_u8(skb, DCB_ATTR_IEEE, err); dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0); return err; } static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; if (!ops) return -EOPNOTSUPP; return dcbnl_ieee_fill(skb, netdev); } static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; int err; if (!ops) return -EOPNOTSUPP; if (!tb[DCB_ATTR_IEEE]) return -EINVAL; err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE], dcbnl_ieee_policy, NULL); if (err) return err; if (ieee[DCB_ATTR_IEEE_APP_TABLE]) { err = dcbnl_app_table_setdel(ieee[DCB_ATTR_IEEE_APP_TABLE], netdev, ops->ieee_delapp ?: dcb_ieee_delapp); if (err) goto err; } if (ieee[DCB_ATTR_DCB_REWR_TABLE]) { err = dcbnl_app_table_setdel(ieee[DCB_ATTR_DCB_REWR_TABLE], netdev, ops->dcbnl_delrewr ?: dcb_delrewr); if (err) goto err; } err: err = nla_put_u8(skb, DCB_ATTR_IEEE, err); dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0); return err; } /* DCBX configuration */ static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { if (!netdev->dcbnl_ops->getdcbx) return -EOPNOTSUPP; return nla_put_u8(skb, DCB_ATTR_DCBX, netdev->dcbnl_ops->getdcbx(netdev)); } static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { u8 value; if (!netdev->dcbnl_ops->setdcbx) return -EOPNOTSUPP; if (!tb[DCB_ATTR_DCBX]) return -EINVAL; value = nla_get_u8(tb[DCB_ATTR_DCBX]); return nla_put_u8(skb, DCB_ATTR_DCBX, netdev->dcbnl_ops->setdcbx(netdev, value)); } static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest; u8 value; int ret, i; int getall = 0; if (!netdev->dcbnl_ops->getfeatcfg) return -EOPNOTSUPP; if (!tb[DCB_ATTR_FEATCFG]) return -EINVAL; ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL); if (ret) return ret; nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG); if (!nest) return -EMSGSIZE; if (data[DCB_FEATCFG_ATTR_ALL]) getall = 1; for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { if (!getall && !data[i]) continue; ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value); if (!ret) ret = nla_put_u8(skb, i, value); if (ret) { nla_nest_cancel(skb, nest); goto nla_put_failure; } } nla_nest_end(skb, nest); nla_put_failure: return ret; } static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1]; int ret, i; u8 value; if (!netdev->dcbnl_ops->setfeatcfg) return -ENOTSUPP; if (!tb[DCB_ATTR_FEATCFG]) return -EINVAL; ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL); if (ret) goto err; for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) { if (data[i] == NULL) continue; value = nla_get_u8(data[i]); ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value); if (ret) goto err; } err: ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret); return ret; } /* Handle CEE DCBX GET commands. */ static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh, u32 seq, struct nlattr **tb, struct sk_buff *skb) { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; if (!ops) return -EOPNOTSUPP; return dcbnl_cee_fill(skb, netdev); } struct reply_func { /* reply netlink message type */ int type; /* function to fill message contents */ int (*cb)(struct net_device *, struct nlmsghdr *, u32, struct nlattr **, struct sk_buff *); }; static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = { [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate }, [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate }, [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg }, [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg }, [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr }, [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap }, [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs }, [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs }, [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate }, [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate }, [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp }, [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp }, [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg }, [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg }, [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg }, [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg }, [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall }, [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg }, [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg }, [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get }, [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set }, [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del }, [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx }, [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx }, [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg }, [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg }, [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get }, }; static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct net_device *netdev; struct dcbmsg *dcb = nlmsg_data(nlh); struct nlattr *tb[DCB_ATTR_MAX + 1]; u32 portid = NETLINK_CB(skb).portid; int ret = -EINVAL; struct sk_buff *reply_skb; struct nlmsghdr *reply_nlh = NULL; const struct reply_func *fn; if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX, dcbnl_rtnl_policy, extack); if (ret < 0) return ret; if (dcb->cmd > DCB_CMD_MAX) return -EINVAL; /* check if a reply function has been defined for the command */ fn = &reply_funcs[dcb->cmd]; if (!fn->cb) return -EOPNOTSUPP; if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (!tb[DCB_ATTR_IFNAME]) return -EINVAL; netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME])); if (!netdev) return -ENODEV; if (!netdev->dcbnl_ops) return -EOPNOTSUPP; reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq, nlh->nlmsg_flags, &reply_nlh); if (!reply_skb) return -ENOMEM; ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb); if (ret < 0) { nlmsg_free(reply_skb); goto out; } nlmsg_end(reply_skb, reply_nlh); ret = rtnl_unicast(reply_skb, net, portid); out: return ret; } static struct dcb_app_type *dcb_rewr_lookup(const struct dcb_app *app, int ifindex, int proto) { struct dcb_app_type *itr; list_for_each_entry(itr, &dcb_rewr_list, list) { if (itr->app.selector == app->selector && itr->app.priority == app->priority && itr->ifindex == ifindex && ((proto == -1) || itr->app.protocol == proto)) return itr; } return NULL; } static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app, int ifindex, int prio) { struct dcb_app_type *itr; list_for_each_entry(itr, &dcb_app_list, list) { if (itr->app.selector == app->selector && itr->app.protocol == app->protocol && itr->ifindex == ifindex && ((prio == -1) || itr->app.priority == prio)) return itr; } return NULL; } static int dcb_app_add(struct list_head *list, const struct dcb_app *app, int ifindex) { struct dcb_app_type *entry; entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return -ENOMEM; memcpy(&entry->app, app, sizeof(*app)); entry->ifindex = ifindex; list_add(&entry->list, list); return 0; } /** * dcb_getapp - retrieve the DCBX application user priority * @dev: network interface * @app: application to get user priority of * * On success returns a non-zero 802.1p user priority bitmap * otherwise returns 0 as the invalid user priority bitmap to * indicate an error. */ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app) { struct dcb_app_type *itr; u8 prio = 0; spin_lock_bh(&dcb_lock); itr = dcb_app_lookup(app, dev->ifindex, -1); if (itr) prio = itr->app.priority; spin_unlock_bh(&dcb_lock); return prio; } EXPORT_SYMBOL(dcb_getapp); /** * dcb_setapp - add CEE dcb application data to app list * @dev: network interface * @new: application data to add * * Priority 0 is an invalid priority in CEE spec. This routine * removes applications from the app list if the priority is * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap */ int dcb_setapp(struct net_device *dev, struct dcb_app *new) { struct dcb_app_type *itr; struct dcb_app_type event; int err = 0; event.ifindex = dev->ifindex; memcpy(&event.app, new, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock_bh(&dcb_lock); /* Search for existing match and replace */ itr = dcb_app_lookup(new, dev->ifindex, -1); if (itr) { if (new->priority) itr->app.priority = new->priority; else { list_del(&itr->list); kfree(itr); } goto out; } /* App type does not exist add new application type */ if (new->priority) err = dcb_app_add(&dcb_app_list, new, dev->ifindex); out: spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_setapp); /** * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority * @dev: network interface * @app: where to store the retrieve application data * * Helper routine which on success returns a non-zero 802.1Qaz user * priority bitmap otherwise returns 0 to indicate the dcb_app was * not found in APP list. */ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app) { struct dcb_app_type *itr; u8 prio = 0; spin_lock_bh(&dcb_lock); itr = dcb_app_lookup(app, dev->ifindex, -1); if (itr) prio |= 1 << itr->app.priority; spin_unlock_bh(&dcb_lock); return prio; } EXPORT_SYMBOL(dcb_ieee_getapp_mask); /* Get protocol value from rewrite entry. */ u16 dcb_getrewr(struct net_device *dev, struct dcb_app *app) { struct dcb_app_type *itr; u16 proto = 0; spin_lock_bh(&dcb_lock); itr = dcb_rewr_lookup(app, dev->ifindex, -1); if (itr) proto = itr->app.protocol; spin_unlock_bh(&dcb_lock); return proto; } EXPORT_SYMBOL(dcb_getrewr); /* Add rewrite entry to the rewrite list. */ int dcb_setrewr(struct net_device *dev, struct dcb_app *new) { int err; spin_lock_bh(&dcb_lock); /* Search for existing match and abort if found. */ if (dcb_rewr_lookup(new, dev->ifindex, new->protocol)) { err = -EEXIST; goto out; } err = dcb_app_add(&dcb_rewr_list, new, dev->ifindex); out: spin_unlock_bh(&dcb_lock); return err; } EXPORT_SYMBOL(dcb_setrewr); /* Delete rewrite entry from the rewrite list. */ int dcb_delrewr(struct net_device *dev, struct dcb_app *del) { struct dcb_app_type *itr; int err = -ENOENT; spin_lock_bh(&dcb_lock); /* Search for existing match and remove it. */ itr = dcb_rewr_lookup(del, dev->ifindex, del->protocol); if (itr) { list_del(&itr->list); kfree(itr); err = 0; } spin_unlock_bh(&dcb_lock); return err; } EXPORT_SYMBOL(dcb_delrewr); /** * dcb_ieee_setapp - add IEEE dcb application data to app list * @dev: network interface * @new: application data to add * * This adds Application data to the list. Multiple application * entries may exists for the same selector and protocol as long * as the priorities are different. Priority is expected to be a * 3-bit unsigned integer */ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new) { struct dcb_app_type event; int err = 0; event.ifindex = dev->ifindex; memcpy(&event.app, new, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock_bh(&dcb_lock); /* Search for existing match and abort if found */ if (dcb_app_lookup(new, dev->ifindex, new->priority)) { err = -EEXIST; goto out; } err = dcb_app_add(&dcb_app_list, new, dev->ifindex); out: spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_ieee_setapp); /** * dcb_ieee_delapp - delete IEEE dcb application data from list * @dev: network interface * @del: application data to delete * * This removes a matching APP data from the APP list */ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del) { struct dcb_app_type *itr; struct dcb_app_type event; int err = -ENOENT; event.ifindex = dev->ifindex; memcpy(&event.app, del, sizeof(event.app)); if (dev->dcbnl_ops->getdcbx) event.dcbx = dev->dcbnl_ops->getdcbx(dev); spin_lock_bh(&dcb_lock); /* Search for existing match and remove it. */ if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { list_del(&itr->list); kfree(itr); err = 0; } spin_unlock_bh(&dcb_lock); if (!err) call_dcbevent_notifiers(DCB_APP_EVENT, &event); return err; } EXPORT_SYMBOL(dcb_ieee_delapp); /* dcb_getrewr_prio_pcp_mask_map - For a given device, find mapping from * priorities to the PCP and DEI values assigned to that priority. */ void dcb_getrewr_prio_pcp_mask_map(const struct net_device *dev, struct dcb_rewr_prio_pcp_map *p_map) { int ifindex = dev->ifindex; struct dcb_app_type *itr; u8 prio; memset(p_map->map, 0, sizeof(p_map->map)); spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_rewr_list, list) { if (itr->ifindex == ifindex && itr->app.selector == DCB_APP_SEL_PCP && itr->app.protocol < 16 && itr->app.priority < IEEE_8021QAZ_MAX_TCS) { prio = itr->app.priority; p_map->map[prio] |= 1 << itr->app.protocol; } } spin_unlock_bh(&dcb_lock); } EXPORT_SYMBOL(dcb_getrewr_prio_pcp_mask_map); /* dcb_getrewr_prio_dscp_mask_map - For a given device, find mapping from * priorities to the DSCP values assigned to that priority. */ void dcb_getrewr_prio_dscp_mask_map(const struct net_device *dev, struct dcb_ieee_app_prio_map *p_map) { int ifindex = dev->ifindex; struct dcb_app_type *itr; u8 prio; memset(p_map->map, 0, sizeof(p_map->map)); spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_rewr_list, list) { if (itr->ifindex == ifindex && itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP && itr->app.protocol < 64 && itr->app.priority < IEEE_8021QAZ_MAX_TCS) { prio = itr->app.priority; p_map->map[prio] |= 1ULL << itr->app.protocol; } } spin_unlock_bh(&dcb_lock); } EXPORT_SYMBOL(dcb_getrewr_prio_dscp_mask_map); /* * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from * priorities to the DSCP values assigned to that priority. Initialize p_map * such that each map element holds a bit mask of DSCP values configured for * that priority by APP entries. */ void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev, struct dcb_ieee_app_prio_map *p_map) { int ifindex = dev->ifindex; struct dcb_app_type *itr; u8 prio; memset(p_map->map, 0, sizeof(p_map->map)); spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == ifindex && itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP && itr->app.protocol < 64 && itr->app.priority < IEEE_8021QAZ_MAX_TCS) { prio = itr->app.priority; p_map->map[prio] |= 1ULL << itr->app.protocol; } } spin_unlock_bh(&dcb_lock); } EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map); /* * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from * DSCP values to the priorities assigned to that DSCP value. Initialize p_map * such that each map element holds a bit mask of priorities configured for a * given DSCP value by APP entries. */ void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev, struct dcb_ieee_app_dscp_map *p_map) { int ifindex = dev->ifindex; struct dcb_app_type *itr; memset(p_map->map, 0, sizeof(p_map->map)); spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == ifindex && itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP && itr->app.protocol < 64 && itr->app.priority < IEEE_8021QAZ_MAX_TCS) p_map->map[itr->app.protocol] |= 1 << itr->app.priority; } spin_unlock_bh(&dcb_lock); } EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map); /* * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet * type, with valid PID values >= 1536. A special meaning is then assigned to * protocol value of 0: "default priority. For use when priority is not * otherwise specified". * * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default * priorities set by these entries. */ u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev) { int ifindex = dev->ifindex; struct dcb_app_type *itr; u8 mask = 0; spin_lock_bh(&dcb_lock); list_for_each_entry(itr, &dcb_app_list, list) { if (itr->ifindex == ifindex && itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && itr->app.protocol == 0 && itr->app.priority < IEEE_8021QAZ_MAX_TCS) mask |= 1 << itr->app.priority; } spin_unlock_bh(&dcb_lock); return mask; } EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask); static void dcbnl_flush_dev(struct net_device *dev) { struct dcb_app_type *itr, *tmp; spin_lock_bh(&dcb_lock); list_for_each_entry_safe(itr, tmp, &dcb_app_list, list) { if (itr->ifindex == dev->ifindex) { list_del(&itr->list); kfree(itr); } } spin_unlock_bh(&dcb_lock); } static int dcbnl_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_UNREGISTER: if (!dev->dcbnl_ops) return NOTIFY_DONE; dcbnl_flush_dev(dev); return NOTIFY_OK; default: return NOTIFY_DONE; } } static struct notifier_block dcbnl_nb __read_mostly = { .notifier_call = dcbnl_netdevice_event, }; static const struct rtnl_msg_handler dcbnl_rtnl_msg_handlers[] __initconst = { {.msgtype = RTM_GETDCB, .doit = dcb_doit}, {.msgtype = RTM_SETDCB, .doit = dcb_doit}, }; static int __init dcbnl_init(void) { int err; err = register_netdevice_notifier(&dcbnl_nb); if (err) return err; rtnl_register_many(dcbnl_rtnl_msg_handlers); return 0; } device_initcall(dcbnl_init);
95 95 95 1 94 83 1 83 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 // SPDX-License-Identifier: GPL-2.0-or-later /* * Linux I2C core ACPI support code * * Copyright (C) 2014 Intel Corp, Author: Lan Tianyu <tianyu.lan@intel.com> */ #include <linux/acpi.h> #include <linux/device.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/list.h> #include <linux/module.h> #include <linux/slab.h> #include "i2c-core.h" struct i2c_acpi_handler_data { struct acpi_connection_info info; struct i2c_adapter *adapter; }; struct gsb_buffer { u8 status; u8 len; union { u16 wdata; u8 bdata; DECLARE_FLEX_ARRAY(u8, data); }; } __packed; struct i2c_acpi_lookup { struct i2c_board_info *info; acpi_handle adapter_handle; acpi_handle device_handle; acpi_handle search_handle; int n; int index; u32 speed; u32 min_speed; u32 force_speed; }; /** * i2c_acpi_get_i2c_resource - Gets I2cSerialBus resource if type matches * @ares: ACPI resource * @i2c: Pointer to I2cSerialBus resource will be returned here * * Checks if the given ACPI resource is of type I2cSerialBus. * In this case, returns a pointer to it to the caller. * * Returns true if resource type is of I2cSerialBus, otherwise false. */ bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, struct acpi_resource_i2c_serialbus **i2c) { struct acpi_resource_i2c_serialbus *sb; if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) return false; sb = &ares->data.i2c_serial_bus; if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) return false; *i2c = sb; return true; } EXPORT_SYMBOL_GPL(i2c_acpi_get_i2c_resource); static int i2c_acpi_resource_count(struct acpi_resource *ares, void *data) { struct acpi_resource_i2c_serialbus *sb; int *count = data; if (i2c_acpi_get_i2c_resource(ares, &sb)) *count = *count + 1; return 1; } /** * i2c_acpi_client_count - Count the number of I2cSerialBus resources * @adev: ACPI device * * Returns the number of I2cSerialBus resources in the ACPI-device's * resource-list; or a negative error code. */ int i2c_acpi_client_count(struct acpi_device *adev) { int ret, count = 0; LIST_HEAD(r); ret = acpi_dev_get_resources(adev, &r, i2c_acpi_resource_count, &count); if (ret < 0) return ret; acpi_dev_free_resource_list(&r); return count; } EXPORT_SYMBOL_GPL(i2c_acpi_client_count); static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data) { struct i2c_acpi_lookup *lookup = data; struct i2c_board_info *info = lookup->info; struct acpi_resource_i2c_serialbus *sb; acpi_status status; if (info->addr || !i2c_acpi_get_i2c_resource(ares, &sb)) return 1; if (lookup->index != -1 && lookup->n++ != lookup->index) return 1; status = acpi_get_handle(lookup->device_handle, sb->resource_source.string_ptr, &lookup->adapter_handle); if (ACPI_FAILURE(status)) return 1; info->addr = sb->slave_address; lookup->speed = sb->connection_speed; if (sb->access_mode == ACPI_I2C_10BIT_MODE) info->flags |= I2C_CLIENT_TEN; return 1; } static const struct acpi_device_id i2c_acpi_ignored_device_ids[] = { /* * ACPI video acpi_devices, which are handled by the acpi-video driver * sometimes contain a SERIAL_TYPE_I2C ACPI resource, ignore these. */ { ACPI_VIDEO_HID, 0 }, {} }; struct i2c_acpi_irq_context { int irq; bool wake_capable; }; static int i2c_acpi_do_lookup(struct acpi_device *adev, struct i2c_acpi_lookup *lookup) { struct i2c_board_info *info = lookup->info; struct list_head resource_list; int ret; if (acpi_bus_get_status(adev)) return -EINVAL; if (!acpi_dev_ready_for_enumeration(adev)) return -ENODEV; if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0) return -ENODEV; memset(info, 0, sizeof(*info)); lookup->device_handle = acpi_device_handle(adev); /* Look up for I2cSerialBus resource */ INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_resources(adev, &resource_list, i2c_acpi_fill_info, lookup); acpi_dev_free_resource_list(&resource_list); if (ret < 0 || !info->addr) return -EINVAL; return 0; } static int i2c_acpi_add_irq_resource(struct acpi_resource *ares, void *data) { struct i2c_acpi_irq_context *irq_ctx = data; struct resource r; if (irq_ctx->irq > 0) return 1; if (!acpi_dev_resource_interrupt(ares, 0, &r)) return 1; irq_ctx->irq = i2c_dev_irq_from_resources(&r, 1); irq_ctx->wake_capable = r.flags & IORESOURCE_IRQ_WAKECAPABLE; return 1; /* No need to add resource to the list */ } /** * i2c_acpi_get_irq - get device IRQ number from ACPI * @client: Pointer to the I2C client device * @wake_capable: Set to true if the IRQ is wake capable * * Find the IRQ number used by a specific client device. * * Return: The IRQ number or an error code. */ int i2c_acpi_get_irq(struct i2c_client *client, bool *wake_capable) { struct acpi_device *adev = ACPI_COMPANION(&client->dev); struct list_head resource_list; struct i2c_acpi_irq_context irq_ctx = { .irq = -ENOENT, }; int ret; INIT_LIST_HEAD(&resource_list); ret = acpi_dev_get_resources(adev, &resource_list, i2c_acpi_add_irq_resource, &irq_ctx); if (ret < 0) return ret; acpi_dev_free_resource_list(&resource_list); if (irq_ctx.irq == -ENOENT) irq_ctx.irq = acpi_dev_gpio_irq_wake_get(adev, 0, &irq_ctx.wake_capable); if (irq_ctx.irq < 0) return irq_ctx.irq; if (wake_capable) *wake_capable = irq_ctx.wake_capable; return irq_ctx.irq; } static int i2c_acpi_get_info(struct acpi_device *adev, struct i2c_board_info *info, struct i2c_adapter *adapter, acpi_handle *adapter_handle) { struct i2c_acpi_lookup lookup; int ret; memset(&lookup, 0, sizeof(lookup)); lookup.info = info; lookup.index = -1; if (acpi_device_enumerated(adev)) return -EINVAL; ret = i2c_acpi_do_lookup(adev, &lookup); if (ret) return ret; if (adapter) { /* The adapter must match the one in I2cSerialBus() connector */ if (ACPI_HANDLE(&adapter->dev) != lookup.adapter_handle) return -ENODEV; } else { struct acpi_device *adapter_adev; /* The adapter must be present */ adapter_adev = acpi_fetch_acpi_dev(lookup.adapter_handle); if (!adapter_adev) return -ENODEV; if (acpi_bus_get_status(adapter_adev) || !adapter_adev->status.present) return -ENODEV; } info->fwnode = acpi_fwnode_handle(adev); if (adapter_handle) *adapter_handle = lookup.adapter_handle; acpi_set_modalias(adev, dev_name(&adev->dev), info->type, sizeof(info->type)); return 0; } static void i2c_acpi_register_device(struct i2c_adapter *adapter, struct acpi_device *adev, struct i2c_board_info *info) { /* * Skip registration on boards where the ACPI tables are * known to contain bogus I2C devices. */ if (acpi_quirk_skip_i2c_client_enumeration(adev)) return; adev->power.flags.ignore_parent = true; acpi_device_set_enumerated(adev); if (IS_ERR(i2c_new_client_device(adapter, info))) adev->power.flags.ignore_parent = false; } static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level, void *data, void **return_value) { struct i2c_adapter *adapter = data; struct acpi_device *adev = acpi_fetch_acpi_dev(handle); struct i2c_board_info info; if (!adev || i2c_acpi_get_info(adev, &info, adapter, NULL)) return AE_OK; i2c_acpi_register_device(adapter, adev, &info); return AE_OK; } #define I2C_ACPI_MAX_SCAN_DEPTH 32 /** * i2c_acpi_register_devices - enumerate I2C slave devices behind adapter * @adap: pointer to adapter * * Enumerate all I2C slave devices behind this adapter by walking the ACPI * namespace. When a device is found it will be added to the Linux device * model and bound to the corresponding ACPI handle. */ void i2c_acpi_register_devices(struct i2c_adapter *adap) { struct acpi_device *adev; acpi_status status; if (!has_acpi_companion(&adap->dev)) return; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, I2C_ACPI_MAX_SCAN_DEPTH, i2c_acpi_add_device, NULL, adap, NULL); if (ACPI_FAILURE(status)) dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); if (!adap->dev.parent) return; adev = ACPI_COMPANION(adap->dev.parent); if (!adev) return; acpi_dev_clear_dependencies(adev); } static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = { /* * These Silead touchscreen controllers only work at 400KHz, for * some reason they do not work at 100KHz. On some devices the ACPI * tables list another device at their bus as only being capable * of 100KHz, testing has shown that these other devices work fine * at 400KHz (as can be expected of any recent i2c hw) so we force * the speed of the bus to 400 KHz if a Silead device is present. */ { "MSSL1680", 0 }, {} }; static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, void *data, void **return_value) { struct i2c_acpi_lookup *lookup = data; struct acpi_device *adev = acpi_fetch_acpi_dev(handle); if (!adev || i2c_acpi_do_lookup(adev, lookup)) return AE_OK; if (lookup->search_handle != lookup->adapter_handle) return AE_OK; if (lookup->speed <= lookup->min_speed) lookup->min_speed = lookup->speed; if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0) lookup->force_speed = I2C_MAX_FAST_MODE_FREQ; return AE_OK; } /** * i2c_acpi_find_bus_speed - find I2C bus speed from ACPI * @dev: The device owning the bus * * Find the I2C bus speed by walking the ACPI namespace for all I2C slaves * devices connected to this bus and use the speed of slowest device. * * Returns the speed in Hz or zero */ u32 i2c_acpi_find_bus_speed(struct device *dev) { struct i2c_acpi_lookup lookup; struct i2c_board_info dummy; acpi_status status; if (!has_acpi_companion(dev)) return 0; memset(&lookup, 0, sizeof(lookup)); lookup.search_handle = ACPI_HANDLE(dev); lookup.min_speed = UINT_MAX; lookup.info = &dummy; lookup.index = -1; status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, I2C_ACPI_MAX_SCAN_DEPTH, i2c_acpi_lookup_speed, NULL, &lookup, NULL); if (ACPI_FAILURE(status)) { dev_warn(dev, "unable to find I2C bus speed from ACPI\n"); return 0; } if (lookup.force_speed) { if (lookup.force_speed != lookup.min_speed) dev_warn(dev, FW_BUG "DSDT uses known not-working I2C bus speed %d, forcing it to %d\n", lookup.min_speed, lookup.force_speed); return lookup.force_speed; } else if (lookup.min_speed != UINT_MAX) { return lookup.min_speed; } else { return 0; } } EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) { struct i2c_adapter *adapter; struct device *dev; dev = bus_find_device(&i2c_bus_type, NULL, handle, device_match_acpi_handle); if (!dev) return NULL; adapter = i2c_verify_adapter(dev); if (!adapter) put_device(dev); return adapter; } EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle); static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) { return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev)); } static struct i2c_adapter *i2c_acpi_find_adapter_by_adev(struct acpi_device *adev) { return i2c_find_adapter_by_fwnode(acpi_fwnode_handle(adev)); } static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value, void *arg) { struct acpi_device *adev = arg; struct i2c_board_info info; acpi_handle adapter_handle; struct i2c_adapter *adapter; struct i2c_client *client; switch (value) { case ACPI_RECONFIG_DEVICE_ADD: if (i2c_acpi_get_info(adev, &info, NULL, &adapter_handle)) break; adapter = i2c_acpi_find_adapter_by_handle(adapter_handle); if (!adapter) break; i2c_acpi_register_device(adapter, adev, &info); put_device(&adapter->dev); break; case ACPI_RECONFIG_DEVICE_REMOVE: if (!acpi_device_enumerated(adev)) break; client = i2c_acpi_find_client_by_adev(adev); if (client) { i2c_unregister_device(client); put_device(&client->dev); } adapter = i2c_acpi_find_adapter_by_adev(adev); if (adapter) { acpi_unbind_one(&adapter->dev); put_device(&adapter->dev); } break; } return NOTIFY_OK; } struct notifier_block i2c_acpi_notifier = { .notifier_call = i2c_acpi_notify, }; /** * i2c_acpi_new_device_by_fwnode - Create i2c-client for the Nth I2cSerialBus resource * @fwnode: fwnode with the ACPI resources to get the client from * @index: Index of ACPI resource to get * @info: describes the I2C device; note this is modified (addr gets set) * Context: can sleep * * By default the i2c subsys creates an i2c-client for the first I2cSerialBus * resource of an acpi_device, but some acpi_devices have multiple I2cSerialBus * resources, in that case this function can be used to create an i2c-client * for other I2cSerialBus resources in the Current Resource Settings table. * * Also see i2c_new_client_device, which this function calls to create the * i2c-client. * * Returns a pointer to the new i2c-client, or error pointer in case of failure. * Specifically, -EPROBE_DEFER is returned if the adapter is not found. */ struct i2c_client *i2c_acpi_new_device_by_fwnode(struct fwnode_handle *fwnode, int index, struct i2c_board_info *info) { struct i2c_acpi_lookup lookup; struct i2c_adapter *adapter; struct acpi_device *adev; LIST_HEAD(resource_list); int ret; adev = to_acpi_device_node(fwnode); if (!adev) return ERR_PTR(-ENODEV); memset(&lookup, 0, sizeof(lookup)); lookup.info = info; lookup.device_handle = acpi_device_handle(adev); lookup.index = index; ret = acpi_dev_get_resources(adev, &resource_list, i2c_acpi_fill_info, &lookup); if (ret < 0) return ERR_PTR(ret); acpi_dev_free_resource_list(&resource_list); if (!info->addr) return ERR_PTR(-EADDRNOTAVAIL); adapter = i2c_acpi_find_adapter_by_handle(lookup.adapter_handle); if (!adapter) return ERR_PTR(-EPROBE_DEFER); return i2c_new_client_device(adapter, info); } EXPORT_SYMBOL_GPL(i2c_acpi_new_device_by_fwnode); bool i2c_acpi_waive_d0_probe(struct device *dev) { struct i2c_driver *driver = to_i2c_driver(dev->driver); struct acpi_device *adev = ACPI_COMPANION(dev); return driver->flags & I2C_DRV_ACPI_WAIVE_D0_PROBE && adev && adev->power.state_for_enumeration >= adev->power.state; } EXPORT_SYMBOL_GPL(i2c_acpi_waive_d0_probe); #ifdef CONFIG_ACPI_I2C_OPREGION static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, u8 cmd, u8 *data, u8 data_len) { struct i2c_msg msgs[2]; int ret; u8 *buffer; buffer = kzalloc(data_len, GFP_KERNEL); if (!buffer) return AE_NO_MEMORY; msgs[0].addr = client->addr; msgs[0].flags = client->flags; msgs[0].len = 1; msgs[0].buf = &cmd; msgs[1].addr = client->addr; msgs[1].flags = client->flags | I2C_M_RD; msgs[1].len = data_len; msgs[1].buf = buffer; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret < 0) { /* Getting a NACK is unfortunately normal with some DSTDs */ if (ret == -EREMOTEIO) dev_dbg(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", data_len, client->addr, cmd, ret); else dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n", data_len, client->addr, cmd, ret); /* 2 transfers must have completed successfully */ } else if (ret == 2) { memcpy(data, buffer, data_len); ret = 0; } else { ret = -EIO; } kfree(buffer); return ret; } static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, u8 cmd, u8 *data, u8 data_len) { struct i2c_msg msgs[1]; u8 *buffer; int ret = AE_OK; buffer = kzalloc(data_len + 1, GFP_KERNEL); if (!buffer) return AE_NO_MEMORY; buffer[0] = cmd; memcpy(buffer + 1, data, data_len); msgs[0].addr = client->addr; msgs[0].flags = client->flags; msgs[0].len = data_len + 1; msgs[0].buf = buffer; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); kfree(buffer); if (ret < 0) { dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret); return ret; } /* 1 transfer must have completed successfully */ return (ret == 1) ? 0 : -EIO; } static acpi_status i2c_acpi_space_handler(u32 function, acpi_physical_address command, u32 bits, u64 *value64, void *handler_context, void *region_context) { struct gsb_buffer *gsb = (struct gsb_buffer *)value64; struct i2c_acpi_handler_data *data = handler_context; struct acpi_connection_info *info = &data->info; struct acpi_resource_i2c_serialbus *sb; struct i2c_adapter *adapter = data->adapter; struct i2c_client *client; struct acpi_resource *ares; u32 accessor_type = function >> 16; u8 action = function & ACPI_IO_MASK; acpi_status ret; int status; ret = acpi_buffer_to_resource(info->connection, info->length, &ares); if (ACPI_FAILURE(ret)) return ret; client = kzalloc(sizeof(*client), GFP_KERNEL); if (!client) { ret = AE_NO_MEMORY; goto err; } if (!value64 || !i2c_acpi_get_i2c_resource(ares, &sb)) { ret = AE_BAD_PARAMETER; goto err; } client->adapter = adapter; client->addr = sb->slave_address; if (sb->access_mode == ACPI_I2C_10BIT_MODE) client->flags |= I2C_CLIENT_TEN; switch (accessor_type) { case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: if (action == ACPI_READ) { status = i2c_smbus_read_byte(client); if (status >= 0) { gsb->bdata = status; status = 0; } } else { status = i2c_smbus_write_byte(client, gsb->bdata); } break; case ACPI_GSB_ACCESS_ATTRIB_BYTE: if (action == ACPI_READ) { status = i2c_smbus_read_byte_data(client, command); if (status >= 0) { gsb->bdata = status; status = 0; } } else { status = i2c_smbus_write_byte_data(client, command, gsb->bdata); } break; case ACPI_GSB_ACCESS_ATTRIB_WORD: if (action == ACPI_READ) { status = i2c_smbus_read_word_data(client, command); if (status >= 0) { gsb->wdata = status; status = 0; } } else { status = i2c_smbus_write_word_data(client, command, gsb->wdata); } break; case ACPI_GSB_ACCESS_ATTRIB_BLOCK: if (action == ACPI_READ) { status = i2c_smbus_read_block_data(client, command, gsb->data); if (status >= 0) { gsb->len = status; status = 0; } } else { status = i2c_smbus_write_block_data(client, command, gsb->len, gsb->data); } break; case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: if (action == ACPI_READ) { status = acpi_gsb_i2c_read_bytes(client, command, gsb->data, info->access_length); } else { status = acpi_gsb_i2c_write_bytes(client, command, gsb->data, info->access_length); } break; default: dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n", accessor_type, client->addr); ret = AE_BAD_PARAMETER; goto err; } gsb->status = status; err: kfree(client); ACPI_FREE(ares); return ret; } int i2c_acpi_install_space_handler(struct i2c_adapter *adapter) { acpi_handle handle; struct i2c_acpi_handler_data *data; acpi_status status; if (!adapter->dev.parent) return -ENODEV; handle = ACPI_HANDLE(adapter->dev.parent); if (!handle) return -ENODEV; data = kzalloc(sizeof(struct i2c_acpi_handler_data), GFP_KERNEL); if (!data) return -ENOMEM; data->adapter = adapter; status = acpi_bus_attach_private_data(handle, (void *)data); if (ACPI_FAILURE(status)) { kfree(data); return -ENOMEM; } status = acpi_install_address_space_handler(handle, ACPI_ADR_SPACE_GSBUS, &i2c_acpi_space_handler, NULL, data); if (ACPI_FAILURE(status)) { dev_err(&adapter->dev, "Error installing i2c space handler\n"); acpi_bus_detach_private_data(handle); kfree(data); return -ENOMEM; } return 0; } void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter) { acpi_handle handle; struct i2c_acpi_handler_data *data; acpi_status status; if (!adapter->dev.parent) return; handle = ACPI_HANDLE(adapter->dev.parent); if (!handle) return; acpi_remove_address_space_handler(handle, ACPI_ADR_SPACE_GSBUS, &i2c_acpi_space_handler); status = acpi_bus_get_private_data(handle, (void **)&data); if (ACPI_SUCCESS(status)) kfree(data); acpi_bus_detach_private_data(handle); } #endif /* CONFIG_ACPI_I2C_OPREGION */
295 262 250 249 244 266 12 5 262 2 250 250 250 250 250 249 174 309 261 31 285 250 253 247 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __SOUND_PCM_PARAMS_H #define __SOUND_PCM_PARAMS_H /* * PCM params helpers * Copyright (c) by Abramo Bagnara <abramo@alsa-project.org> */ #include <sound/pcm.h> int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir); int snd_pcm_hw_param_last(struct snd_pcm_substream *pcm, struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir); int snd_pcm_hw_param_value(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var, int *dir); #define SNDRV_MASK_BITS 64 /* we use so far 64bits only */ #define SNDRV_MASK_SIZE (SNDRV_MASK_BITS / 32) #define MASK_OFS(i) ((i) >> 5) #define MASK_BIT(i) (1U << ((i) & 31)) static inline void snd_mask_none(struct snd_mask *mask) { memset(mask, 0, sizeof(*mask)); } static inline void snd_mask_any(struct snd_mask *mask) { memset(mask, 0xff, SNDRV_MASK_SIZE * sizeof(u_int32_t)); } static inline int snd_mask_empty(const struct snd_mask *mask) { int i; for (i = 0; i < SNDRV_MASK_SIZE; i++) if (mask->bits[i]) return 0; return 1; } static inline unsigned int snd_mask_min(const struct snd_mask *mask) { int i; for (i = 0; i < SNDRV_MASK_SIZE; i++) { if (mask->bits[i]) return __ffs(mask->bits[i]) + (i << 5); } return 0; } static inline unsigned int snd_mask_max(const struct snd_mask *mask) { int i; for (i = SNDRV_MASK_SIZE - 1; i >= 0; i--) { if (mask->bits[i]) return __fls(mask->bits[i]) + (i << 5); } return 0; } static inline void snd_mask_set(struct snd_mask *mask, unsigned int val) { mask->bits[MASK_OFS(val)] |= MASK_BIT(val); } /* Most of drivers need only this one */ static inline void snd_mask_set_format(struct snd_mask *mask, snd_pcm_format_t format) { snd_mask_set(mask, (__force unsigned int)format); } static inline void snd_mask_reset(struct snd_mask *mask, unsigned int val) { mask->bits[MASK_OFS(val)] &= ~MASK_BIT(val); } static inline void snd_mask_set_range(struct snd_mask *mask, unsigned int from, unsigned int to) { unsigned int i; for (i = from; i <= to; i++) mask->bits[MASK_OFS(i)] |= MASK_BIT(i); } static inline void snd_mask_reset_range(struct snd_mask *mask, unsigned int from, unsigned int to) { unsigned int i; for (i = from; i <= to; i++) mask->bits[MASK_OFS(i)] &= ~MASK_BIT(i); } static inline void snd_mask_leave(struct snd_mask *mask, unsigned int val) { unsigned int v; v = mask->bits[MASK_OFS(val)] & MASK_BIT(val); snd_mask_none(mask); mask->bits[MASK_OFS(val)] = v; } static inline void snd_mask_intersect(struct snd_mask *mask, const struct snd_mask *v) { int i; for (i = 0; i < SNDRV_MASK_SIZE; i++) mask->bits[i] &= v->bits[i]; } static inline int snd_mask_eq(const struct snd_mask *mask, const struct snd_mask *v) { return ! memcmp(mask, v, SNDRV_MASK_SIZE * sizeof(u_int32_t)); } static inline void snd_mask_copy(struct snd_mask *mask, const struct snd_mask *v) { *mask = *v; } static inline int snd_mask_test(const struct snd_mask *mask, unsigned int val) { return mask->bits[MASK_OFS(val)] & MASK_BIT(val); } /* Most of drivers need only this one */ static inline int snd_mask_test_format(const struct snd_mask *mask, snd_pcm_format_t format) { return snd_mask_test(mask, (__force unsigned int)format); } static inline int snd_mask_single(const struct snd_mask *mask) { int i, c = 0; for (i = 0; i < SNDRV_MASK_SIZE; i++) { if (! mask->bits[i]) continue; if (mask->bits[i] & (mask->bits[i] - 1)) return 0; if (c) return 0; c++; } return 1; } static inline int snd_mask_refine(struct snd_mask *mask, const struct snd_mask *v) { struct snd_mask old; snd_mask_copy(&old, mask); snd_mask_intersect(mask, v); if (snd_mask_empty(mask)) return -EINVAL; return !snd_mask_eq(mask, &old); } static inline int snd_mask_refine_first(struct snd_mask *mask) { if (snd_mask_single(mask)) return 0; snd_mask_leave(mask, snd_mask_min(mask)); return 1; } static inline int snd_mask_refine_last(struct snd_mask *mask) { if (snd_mask_single(mask)) return 0; snd_mask_leave(mask, snd_mask_max(mask)); return 1; } static inline int snd_mask_refine_min(struct snd_mask *mask, unsigned int val) { if (snd_mask_min(mask) >= val) return 0; snd_mask_reset_range(mask, 0, val - 1); if (snd_mask_empty(mask)) return -EINVAL; return 1; } static inline int snd_mask_refine_max(struct snd_mask *mask, unsigned int val) { if (snd_mask_max(mask) <= val) return 0; snd_mask_reset_range(mask, val + 1, SNDRV_MASK_BITS); if (snd_mask_empty(mask)) return -EINVAL; return 1; } static inline int snd_mask_refine_set(struct snd_mask *mask, unsigned int val) { int changed; changed = !snd_mask_single(mask); snd_mask_leave(mask, val); if (snd_mask_empty(mask)) return -EINVAL; return changed; } static inline int snd_mask_value(const struct snd_mask *mask) { return snd_mask_min(mask); } static inline void snd_interval_any(struct snd_interval *i) { i->min = 0; i->openmin = 0; i->max = UINT_MAX; i->openmax = 0; i->integer = 0; i->empty = 0; } static inline void snd_interval_none(struct snd_interval *i) { i->empty = 1; } static inline int snd_interval_checkempty(const struct snd_interval *i) { return (i->min > i->max || (i->min == i->max && (i->openmin || i->openmax))); } static inline int snd_interval_empty(const struct snd_interval *i) { return i->empty; } static inline int snd_interval_single(const struct snd_interval *i) { return (i->min == i->max || (i->min + 1 == i->max && (i->openmin || i->openmax))); } static inline int snd_interval_value(const struct snd_interval *i) { if (i->openmin && !i->openmax) return i->max; return i->min; } static inline int snd_interval_min(const struct snd_interval *i) { return i->min; } static inline int snd_interval_max(const struct snd_interval *i) { unsigned int v; v = i->max; if (i->openmax) v--; return v; } static inline int snd_interval_test(const struct snd_interval *i, unsigned int val) { return !((i->min > val || (i->min == val && i->openmin) || i->max < val || (i->max == val && i->openmax))); } static inline void snd_interval_copy(struct snd_interval *d, const struct snd_interval *s) { *d = *s; } static inline int snd_interval_setinteger(struct snd_interval *i) { if (i->integer) return 0; if (i->openmin && i->openmax && i->min == i->max) return -EINVAL; i->integer = 1; return 1; } static inline int snd_interval_eq(const struct snd_interval *i1, const struct snd_interval *i2) { if (i1->empty) return i2->empty; if (i2->empty) return i1->empty; return i1->min == i2->min && i1->openmin == i2->openmin && i1->max == i2->max && i1->openmax == i2->openmax; } /** * params_access - get the access type from the hw params * @p: hw params */ static inline snd_pcm_access_t params_access(const struct snd_pcm_hw_params *p) { return (__force snd_pcm_access_t)snd_mask_min(hw_param_mask_c(p, SNDRV_PCM_HW_PARAM_ACCESS)); } /** * params_format - get the sample format from the hw params * @p: hw params */ static inline snd_pcm_format_t params_format(const struct snd_pcm_hw_params *p) { return (__force snd_pcm_format_t)snd_mask_min(hw_param_mask_c(p, SNDRV_PCM_HW_PARAM_FORMAT)); } /** * params_subformat - get the sample subformat from the hw params * @p: hw params */ static inline snd_pcm_subformat_t params_subformat(const struct snd_pcm_hw_params *p) { return (__force snd_pcm_subformat_t)snd_mask_min(hw_param_mask_c(p, SNDRV_PCM_HW_PARAM_SUBFORMAT)); } /** * params_period_bytes - get the period size (in bytes) from the hw params * @p: hw params */ static inline unsigned int params_period_bytes(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_BYTES)->min; } /** * params_width - get the number of bits of the sample format from the hw params * @p: hw params * * This function returns the number of bits per sample that the selected sample * format of the hw params has. */ static inline int params_width(const struct snd_pcm_hw_params *p) { return snd_pcm_format_width(params_format(p)); } /* * params_physical_width - get the storage size of the sample format from the hw params * @p: hw params * * This functions returns the number of bits per sample that the selected sample * format of the hw params takes up in memory. This will be equal or larger than * params_width(). */ static inline int params_physical_width(const struct snd_pcm_hw_params *p) { return snd_pcm_format_physical_width(params_format(p)); } int snd_pcm_hw_params_bits(const struct snd_pcm_hw_params *p); static inline void params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt) { snd_mask_set_format(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT), fmt); } #endif /* __SOUND_PCM_PARAMS_H */
101 100 100 101 128 122 8 6 130 124 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 // SPDX-License-Identifier: MIT /* * Copyright (C) 2019 Google, Inc. * * Authors: * Sean Paul <seanpaul@chromium.org> */ #include <linux/average.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_mode_config.h> #include <drm/drm_modeset_lock.h> #include <drm/drm_print.h> #include <drm/drm_self_refresh_helper.h> /** * DOC: overview * * This helper library provides an easy way for drivers to leverage the atomic * framework to implement panel self refresh (SR) support. Drivers are * responsible for initializing and cleaning up the SR helpers on load/unload * (see &drm_self_refresh_helper_init/&drm_self_refresh_helper_cleanup). * The connector is responsible for setting * &drm_connector_state.self_refresh_aware to true at runtime if it is SR-aware * (meaning it knows how to initiate self refresh on the panel). * * Once a crtc has enabled SR using &drm_self_refresh_helper_init, the * helpers will monitor activity and call back into the driver to enable/disable * SR as appropriate. The best way to think about this is that it's a DPMS * on/off request with &drm_crtc_state.self_refresh_active set in crtc state * that tells you to disable/enable SR on the panel instead of power-cycling it. * * During SR, drivers may choose to fully disable their crtc/encoder/bridge * hardware (in which case no driver changes are necessary), or they can inspect * &drm_crtc_state.self_refresh_active if they want to enter low power mode * without full disable (in case full disable/enable is too slow). * * SR will be deactivated if there are any atomic updates affecting the * pipe that is in SR mode. If a crtc is driving multiple connectors, all * connectors must be SR aware and all will enter/exit SR mode at the same time. * * If the crtc and connector are SR aware, but the panel connected does not * support it (or is otherwise unable to enter SR), the driver should fail * atomic_check when &drm_crtc_state.self_refresh_active is true. */ #define SELF_REFRESH_AVG_SEED_MS 200 DECLARE_EWMA(psr_time, 4, 4) struct drm_self_refresh_data { struct drm_crtc *crtc; struct delayed_work entry_work; struct mutex avg_mutex; struct ewma_psr_time entry_avg_ms; struct ewma_psr_time exit_avg_ms; }; static void drm_self_refresh_helper_entry_work(struct work_struct *work) { struct drm_self_refresh_data *sr_data = container_of( to_delayed_work(work), struct drm_self_refresh_data, entry_work); struct drm_crtc *crtc = sr_data->crtc; struct drm_device *dev = crtc->dev; struct drm_modeset_acquire_ctx ctx; struct drm_atomic_state *state; struct drm_connector *conn; struct drm_connector_state *conn_state; struct drm_crtc_state *crtc_state; int i, ret = 0; drm_modeset_acquire_init(&ctx, 0); state = drm_atomic_state_alloc(dev); if (!state) { ret = -ENOMEM; goto out_drop_locks; } retry: state->acquire_ctx = &ctx; crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); goto out; } if (!crtc_state->enable) goto out; ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) goto out; for_each_new_connector_in_state(state, conn, conn_state, i) { if (!conn_state->self_refresh_aware) goto out; } crtc_state->active = false; crtc_state->self_refresh_active = true; ret = drm_atomic_commit(state); if (ret) goto out; out: if (ret == -EDEADLK) { drm_atomic_state_clear(state); ret = drm_modeset_backoff(&ctx); if (!ret) goto retry; } drm_atomic_state_put(state); out_drop_locks: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); } /** * drm_self_refresh_helper_update_avg_times - Updates a crtc's SR time averages * @state: the state which has just been applied to hardware * @commit_time_ms: the amount of time in ms that this commit took to complete * @new_self_refresh_mask: bitmask of crtc's that have self_refresh_active in * new state * * Called after &drm_mode_config_funcs.atomic_commit_tail, this function will * update the average entry/exit self refresh times on self refresh transitions. * These averages will be used when calculating how long to delay before * entering self refresh mode after activity. */ void drm_self_refresh_helper_update_avg_times(struct drm_atomic_state *state, unsigned int commit_time_ms, unsigned int new_self_refresh_mask) { struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int i; for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { bool new_self_refresh_active = new_self_refresh_mask & BIT(i); struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; struct ewma_psr_time *time; if (old_crtc_state->self_refresh_active == new_self_refresh_active) continue; if (new_self_refresh_active) time = &sr_data->entry_avg_ms; else time = &sr_data->exit_avg_ms; mutex_lock(&sr_data->avg_mutex); ewma_psr_time_add(time, commit_time_ms); mutex_unlock(&sr_data->avg_mutex); } } EXPORT_SYMBOL(drm_self_refresh_helper_update_avg_times); /** * drm_self_refresh_helper_alter_state - Alters the atomic state for SR exit * @state: the state currently being checked * * Called at the end of atomic check. This function checks the state for flags * incompatible with self refresh exit and changes them. This is a bit * disingenuous since userspace is expecting one thing and we're giving it * another. However in order to keep self refresh entirely hidden from * userspace, this is required. * * At the end, we queue up the self refresh entry work so we can enter PSR after * the desired delay. */ void drm_self_refresh_helper_alter_state(struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int i; if (state->async_update || !state->allow_modeset) { for_each_old_crtc_in_state(state, crtc, crtc_state, i) { if (crtc_state->self_refresh_active) { state->async_update = false; state->allow_modeset = true; break; } } } for_each_new_crtc_in_state(state, crtc, crtc_state, i) { struct drm_self_refresh_data *sr_data; unsigned int delay; /* Don't trigger the entry timer when we're already in SR */ if (crtc_state->self_refresh_active) continue; sr_data = crtc->self_refresh_data; if (!sr_data) continue; mutex_lock(&sr_data->avg_mutex); delay = (ewma_psr_time_read(&sr_data->entry_avg_ms) + ewma_psr_time_read(&sr_data->exit_avg_ms)) * 2; mutex_unlock(&sr_data->avg_mutex); mod_delayed_work(system_wq, &sr_data->entry_work, msecs_to_jiffies(delay)); } } EXPORT_SYMBOL(drm_self_refresh_helper_alter_state); /** * drm_self_refresh_helper_init - Initializes self refresh helpers for a crtc * @crtc: the crtc which supports self refresh supported displays * * Returns zero if successful or -errno on failure */ int drm_self_refresh_helper_init(struct drm_crtc *crtc) { struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; /* Helper is already initialized */ if (WARN_ON(sr_data)) return -EINVAL; sr_data = kzalloc(sizeof(*sr_data), GFP_KERNEL); if (!sr_data) return -ENOMEM; INIT_DELAYED_WORK(&sr_data->entry_work, drm_self_refresh_helper_entry_work); sr_data->crtc = crtc; mutex_init(&sr_data->avg_mutex); ewma_psr_time_init(&sr_data->entry_avg_ms); ewma_psr_time_init(&sr_data->exit_avg_ms); /* * Seed the averages so they're non-zero (and sufficiently large * for even poorly performing panels). As time goes on, this will be * averaged out and the values will trend to their true value. */ ewma_psr_time_add(&sr_data->entry_avg_ms, SELF_REFRESH_AVG_SEED_MS); ewma_psr_time_add(&sr_data->exit_avg_ms, SELF_REFRESH_AVG_SEED_MS); crtc->self_refresh_data = sr_data; return 0; } EXPORT_SYMBOL(drm_self_refresh_helper_init); /** * drm_self_refresh_helper_cleanup - Cleans up self refresh helpers for a crtc * @crtc: the crtc to cleanup */ void drm_self_refresh_helper_cleanup(struct drm_crtc *crtc) { struct drm_self_refresh_data *sr_data = crtc->self_refresh_data; /* Helper is already uninitialized */ if (!sr_data) return; crtc->self_refresh_data = NULL; cancel_delayed_work_sync(&sr_data->entry_work); kfree(sr_data); } EXPORT_SYMBOL(drm_self_refresh_helper_cleanup);
4 4 10 10 2 1 1 6 1 6 6 1 7 7 7 7 7 7 4 3 7 2 2 2 1 1 1 1 1 2 2 2 2 2 2 2 2 2 3 1 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 // SPDX-License-Identifier: GPL-2.0 /* * chaoskey - driver for ChaosKey device from Altus Metrum. * * This device provides true random numbers using a noise source based * on a reverse-biased p-n junction in avalanche breakdown. More * details can be found at http://chaoskey.org * * The driver connects to the kernel hardware RNG interface to provide * entropy for /dev/random and other kernel activities. It also offers * a separate /dev/ entry to allow for direct access to the random * bit stream. * * Copyright © 2015 Keith Packard <keithp@keithp.com> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/wait.h> #include <linux/hw_random.h> #include <linux/mutex.h> #include <linux/uaccess.h> static struct usb_driver chaoskey_driver; static struct usb_class_driver chaoskey_class; static int chaoskey_rng_read(struct hwrng *rng, void *data, size_t max, bool wait); static DEFINE_MUTEX(chaoskey_list_lock); #define usb_dbg(usb_if, format, arg...) \ dev_dbg(&(usb_if)->dev, format, ## arg) #define usb_err(usb_if, format, arg...) \ dev_err(&(usb_if)->dev, format, ## arg) /* Version Information */ #define DRIVER_AUTHOR "Keith Packard, keithp@keithp.com" #define DRIVER_DESC "Altus Metrum ChaosKey driver" #define DRIVER_SHORT "chaoskey" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define CHAOSKEY_VENDOR_ID 0x1d50 /* OpenMoko */ #define CHAOSKEY_PRODUCT_ID 0x60c6 /* ChaosKey */ #define ALEA_VENDOR_ID 0x12d8 /* Araneus */ #define ALEA_PRODUCT_ID 0x0001 /* Alea I */ #define CHAOSKEY_BUF_LEN 64 /* max size of USB full speed packet */ #define NAK_TIMEOUT (HZ) /* normal stall/wait timeout */ #define ALEA_FIRST_TIMEOUT (HZ*3) /* first stall/wait timeout for Alea */ #ifdef CONFIG_USB_DYNAMIC_MINORS #define USB_CHAOSKEY_MINOR_BASE 0 #else /* IOWARRIOR_MINOR_BASE + 16, not official yet */ #define USB_CHAOSKEY_MINOR_BASE 224 #endif static const struct usb_device_id chaoskey_table[] = { { USB_DEVICE(CHAOSKEY_VENDOR_ID, CHAOSKEY_PRODUCT_ID) }, { USB_DEVICE(ALEA_VENDOR_ID, ALEA_PRODUCT_ID) }, { }, }; MODULE_DEVICE_TABLE(usb, chaoskey_table); static void chaos_read_callback(struct urb *urb); /* Driver-local specific stuff */ struct chaoskey { struct usb_interface *interface; char in_ep; struct mutex lock; struct mutex rng_lock; int open; /* open count */ bool present; /* device not disconnected */ bool reading; /* ongoing IO */ bool reads_started; /* track first read for Alea */ int size; /* size of buf */ int valid; /* bytes of buf read */ int used; /* bytes of buf consumed */ char *name; /* product + serial */ struct hwrng hwrng; /* Embedded struct for hwrng */ int hwrng_registered; /* registered with hwrng API */ wait_queue_head_t wait_q; /* for timeouts */ struct urb *urb; /* for performing IO */ char *buf; }; static void chaoskey_free(struct chaoskey *dev) { if (dev) { usb_dbg(dev->interface, "free"); usb_free_urb(dev->urb); kfree(dev->name); kfree(dev->buf); usb_put_intf(dev->interface); kfree(dev); } } static int chaoskey_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct usb_host_interface *altsetting = interface->cur_altsetting; struct usb_endpoint_descriptor *epd; int in_ep; struct chaoskey *dev; int result = -ENOMEM; int size; int res; usb_dbg(interface, "probe %s-%s", udev->product, udev->serial); /* Find the first bulk IN endpoint and its packet size */ res = usb_find_bulk_in_endpoint(altsetting, &epd); if (res) { usb_dbg(interface, "no IN endpoint found"); return res; } in_ep = usb_endpoint_num(epd); size = usb_endpoint_maxp(epd); /* Validate endpoint and size */ if (size <= 0) { usb_dbg(interface, "invalid size (%d)", size); return -ENODEV; } if (size > CHAOSKEY_BUF_LEN) { usb_dbg(interface, "size reduced from %d to %d\n", size, CHAOSKEY_BUF_LEN); size = CHAOSKEY_BUF_LEN; } /* Looks good, allocate and initialize */ dev = kzalloc(sizeof(struct chaoskey), GFP_KERNEL); if (dev == NULL) goto out; dev->interface = usb_get_intf(interface); dev->buf = kmalloc(size, GFP_KERNEL); if (dev->buf == NULL) goto out; dev->urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb) goto out; usb_fill_bulk_urb(dev->urb, udev, usb_rcvbulkpipe(udev, in_ep), dev->buf, size, chaos_read_callback, dev); /* Construct a name using the product and serial values. Each * device needs a unique name for the hwrng code */ if (udev->product && udev->serial) { dev->name = kasprintf(GFP_KERNEL, "%s-%s", udev->product, udev->serial); if (dev->name == NULL) goto out; } dev->in_ep = in_ep; if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID) dev->reads_started = true; dev->size = size; dev->present = true; init_waitqueue_head(&dev->wait_q); mutex_init(&dev->lock); mutex_init(&dev->rng_lock); usb_set_intfdata(interface, dev); result = usb_register_dev(interface, &chaoskey_class); if (result) { usb_err(interface, "Unable to allocate minor number."); goto out; } dev->hwrng.name = dev->name ? dev->name : chaoskey_driver.name; dev->hwrng.read = chaoskey_rng_read; dev->hwrng_registered = (hwrng_register(&dev->hwrng) == 0); if (!dev->hwrng_registered) usb_err(interface, "Unable to register with hwrng"); usb_enable_autosuspend(udev); usb_dbg(interface, "chaoskey probe success, size %d", dev->size); return 0; out: usb_set_intfdata(interface, NULL); chaoskey_free(dev); return result; } static void chaoskey_disconnect(struct usb_interface *interface) { struct chaoskey *dev; usb_dbg(interface, "disconnect"); dev = usb_get_intfdata(interface); if (!dev) { usb_dbg(interface, "disconnect failed - no dev"); return; } if (dev->hwrng_registered) hwrng_unregister(&dev->hwrng); usb_deregister_dev(interface, &chaoskey_class); usb_set_intfdata(interface, NULL); mutex_lock(&chaoskey_list_lock); mutex_lock(&dev->lock); dev->present = false; usb_poison_urb(dev->urb); if (!dev->open) { mutex_unlock(&dev->lock); chaoskey_free(dev); } else mutex_unlock(&dev->lock); mutex_unlock(&chaoskey_list_lock); usb_dbg(interface, "disconnect done"); } static int chaoskey_open(struct inode *inode, struct file *file) { struct chaoskey *dev; struct usb_interface *interface; int rv = 0; /* get the interface from minor number and driver information */ interface = usb_find_interface(&chaoskey_driver, iminor(inode)); if (!interface) return -ENODEV; usb_dbg(interface, "open"); dev = usb_get_intfdata(interface); if (!dev) { usb_dbg(interface, "open (dev)"); return -ENODEV; } file->private_data = dev; mutex_lock(&chaoskey_list_lock); mutex_lock(&dev->lock); if (dev->present) ++dev->open; else rv = -ENODEV; mutex_unlock(&dev->lock); mutex_unlock(&chaoskey_list_lock); return rv; } static int chaoskey_release(struct inode *inode, struct file *file) { struct chaoskey *dev = file->private_data; struct usb_interface *interface; int rv = 0; if (dev == NULL) return -ENODEV; interface = dev->interface; usb_dbg(interface, "release"); mutex_lock(&chaoskey_list_lock); mutex_lock(&dev->lock); usb_dbg(interface, "open count at release is %d", dev->open); if (dev->open <= 0) { usb_dbg(interface, "invalid open count (%d)", dev->open); rv = -ENODEV; goto bail; } --dev->open; if (!dev->present) { if (dev->open == 0) { mutex_unlock(&dev->lock); chaoskey_free(dev); goto destruction; } } bail: mutex_unlock(&dev->lock); destruction: mutex_unlock(&chaoskey_list_lock); usb_dbg(interface, "release success"); return rv; } static void chaos_read_callback(struct urb *urb) { struct chaoskey *dev = urb->context; int status = urb->status; usb_dbg(dev->interface, "callback status (%d)", status); if (status == 0) dev->valid = urb->actual_length; else dev->valid = 0; dev->used = 0; /* must be seen first before validity is announced */ smp_wmb(); dev->reading = false; wake_up(&dev->wait_q); } /* Fill the buffer. Called with dev->lock held */ static int _chaoskey_fill(struct chaoskey *dev) { DEFINE_WAIT(wait); int result; bool started; usb_dbg(dev->interface, "fill"); /* Return immediately if someone called before the buffer was * empty */ if (dev->valid != dev->used) { usb_dbg(dev->interface, "not empty yet (valid %d used %d)", dev->valid, dev->used); return 0; } /* Bail if the device has been removed */ if (!dev->present) { usb_dbg(dev->interface, "device not present"); return -ENODEV; } /* Make sure the device is awake */ result = usb_autopm_get_interface(dev->interface); if (result) { usb_dbg(dev->interface, "wakeup failed (result %d)", result); return result; } dev->reading = true; result = usb_submit_urb(dev->urb, GFP_KERNEL); if (result < 0) { result = usb_translate_errors(result); dev->reading = false; goto out; } /* The first read on the Alea takes a little under 2 seconds. * Reads after the first read take only a few microseconds * though. Presumably the entropy-generating circuit needs * time to ramp up. So, we wait longer on the first read. */ started = dev->reads_started; dev->reads_started = true; result = wait_event_interruptible_timeout( dev->wait_q, !dev->reading, (started ? NAK_TIMEOUT : ALEA_FIRST_TIMEOUT) ); if (result < 0) { usb_kill_urb(dev->urb); goto out; } if (result == 0) { result = -ETIMEDOUT; usb_kill_urb(dev->urb); } else { result = dev->valid; } out: /* Let the device go back to sleep eventually */ usb_autopm_put_interface(dev->interface); usb_dbg(dev->interface, "read %d bytes", dev->valid); return result; } static ssize_t chaoskey_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct chaoskey *dev; ssize_t read_count = 0; int this_time; int result = 0; unsigned long remain; dev = file->private_data; if (dev == NULL || !dev->present) return -ENODEV; usb_dbg(dev->interface, "read %zu", count); while (count > 0) { /* Grab the rng_lock briefly to ensure that the hwrng interface * gets priority over other user access */ result = mutex_lock_interruptible(&dev->rng_lock); if (result) goto bail; mutex_unlock(&dev->rng_lock); result = mutex_lock_interruptible(&dev->lock); if (result) goto bail; if (dev->valid == dev->used) { result = _chaoskey_fill(dev); if (result < 0) { mutex_unlock(&dev->lock); goto bail; } } this_time = dev->valid - dev->used; if (this_time > count) this_time = count; remain = copy_to_user(buffer, dev->buf + dev->used, this_time); if (remain) { result = -EFAULT; /* Consume the bytes that were copied so we don't leak * data to user space */ dev->used += this_time - remain; mutex_unlock(&dev->lock); goto bail; } count -= this_time; read_count += this_time; buffer += this_time; dev->used += this_time; mutex_unlock(&dev->lock); } bail: if (read_count) { usb_dbg(dev->interface, "read %zu bytes", read_count); return read_count; } usb_dbg(dev->interface, "empty read, result %d", result); if (result == -ETIMEDOUT) result = -EAGAIN; return result; } static int chaoskey_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct chaoskey *dev = container_of(rng, struct chaoskey, hwrng); int this_time; usb_dbg(dev->interface, "rng_read max %zu wait %d", max, wait); if (!dev->present) { usb_dbg(dev->interface, "device not present"); return 0; } /* Hold the rng_lock until we acquire the device lock so that * this operation gets priority over other user access to the * device */ mutex_lock(&dev->rng_lock); mutex_lock(&dev->lock); mutex_unlock(&dev->rng_lock); /* Try to fill the buffer if empty. It doesn't actually matter * if _chaoskey_fill works; we'll just return zero bytes as * the buffer will still be empty */ if (dev->valid == dev->used) (void) _chaoskey_fill(dev); this_time = dev->valid - dev->used; if (this_time > max) this_time = max; memcpy(data, dev->buf + dev->used, this_time); dev->used += this_time; mutex_unlock(&dev->lock); usb_dbg(dev->interface, "rng_read this_time %d\n", this_time); return this_time; } #ifdef CONFIG_PM static int chaoskey_suspend(struct usb_interface *interface, pm_message_t message) { usb_dbg(interface, "suspend"); return 0; } static int chaoskey_resume(struct usb_interface *interface) { struct chaoskey *dev; struct usb_device *udev = interface_to_usbdev(interface); usb_dbg(interface, "resume"); dev = usb_get_intfdata(interface); /* * We may have lost power. * In that case the device that needs a long time * for the first requests needs an extended timeout * again */ if (le16_to_cpu(udev->descriptor.idVendor) == ALEA_VENDOR_ID) dev->reads_started = false; return 0; } #else #define chaoskey_suspend NULL #define chaoskey_resume NULL #endif /* file operation pointers */ static const struct file_operations chaoskey_fops = { .owner = THIS_MODULE, .read = chaoskey_read, .open = chaoskey_open, .release = chaoskey_release, .llseek = default_llseek, }; /* class driver information */ static struct usb_class_driver chaoskey_class = { .name = "chaoskey%d", .fops = &chaoskey_fops, .minor_base = USB_CHAOSKEY_MINOR_BASE, }; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver chaoskey_driver = { .name = DRIVER_SHORT, .probe = chaoskey_probe, .disconnect = chaoskey_disconnect, .suspend = chaoskey_suspend, .resume = chaoskey_resume, .reset_resume = chaoskey_resume, .id_table = chaoskey_table, .supports_autosuspend = 1, }; module_usb_driver(chaoskey_driver);
2 2 1 1 2 2 2 1 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 // SPDX-License-Identifier: GPL-2.0-or-later /* * DVB USB Linux driver for Alcor Micro AU6610 DVB-T USB2.0. * * Copyright (C) 2006 Antti Palosaari <crope@iki.fi> */ #include "au6610.h" #include "zl10353.h" #include "qt1010.h" DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); static int au6610_usb_msg(struct dvb_usb_device *d, u8 operation, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { int ret; u16 index; u8 *usb_buf; /* * allocate enough for all known requests, * read returns 5 and write 6 bytes */ usb_buf = kmalloc(6, GFP_KERNEL); if (!usb_buf) return -ENOMEM; switch (wlen) { case 1: index = wbuf[0] << 8; break; case 2: index = wbuf[0] << 8; index += wbuf[1]; break; default: dev_err(&d->udev->dev, "%s: wlen=%d, aborting\n", KBUILD_MODNAME, wlen); ret = -EINVAL; goto error; } ret = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev, 0), operation, USB_TYPE_VENDOR|USB_DIR_IN, addr << 1, index, usb_buf, 6, AU6610_USB_TIMEOUT); dvb_usb_dbg_usb_control_msg(d->udev, operation, (USB_TYPE_VENDOR|USB_DIR_IN), addr << 1, index, usb_buf, 6); if (ret < 0) goto error; switch (operation) { case AU6610_REQ_I2C_READ: case AU6610_REQ_USB_READ: /* requested value is always 5th byte in buffer */ rbuf[0] = usb_buf[4]; } error: kfree(usb_buf); return ret; } static int au6610_i2c_msg(struct dvb_usb_device *d, u8 addr, u8 *wbuf, u16 wlen, u8 *rbuf, u16 rlen) { u8 request; u8 wo = (rbuf == NULL || rlen == 0); /* write-only */ if (wo) { request = AU6610_REQ_I2C_WRITE; } else { /* rw */ request = AU6610_REQ_I2C_READ; } return au6610_usb_msg(d, request, addr, wbuf, wlen, rbuf, rlen); } /* I2C */ static int au6610_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); int i; if (num > 2) return -EINVAL; if (mutex_lock_interruptible(&d->i2c_mutex) < 0) return -EAGAIN; for (i = 0; i < num; i++) { /* write/read request */ if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) { if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, msg[i+1].buf, msg[i+1].len) < 0) break; i++; } else if (au6610_i2c_msg(d, msg[i].addr, msg[i].buf, msg[i].len, NULL, 0) < 0) break; } mutex_unlock(&d->i2c_mutex); return i; } static u32 au6610_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm au6610_i2c_algo = { .master_xfer = au6610_i2c_xfer, .functionality = au6610_i2c_func, }; /* Callbacks for DVB USB */ static struct zl10353_config au6610_zl10353_config = { .demod_address = 0x0f, .no_tuner = 1, .parallel_ts = 1, }; static int au6610_zl10353_frontend_attach(struct dvb_usb_adapter *adap) { adap->fe[0] = dvb_attach(zl10353_attach, &au6610_zl10353_config, &adap_to_d(adap)->i2c_adap); if (adap->fe[0] == NULL) return -ENODEV; return 0; } static struct qt1010_config au6610_qt1010_config = { .i2c_address = 0x62 }; static int au6610_qt1010_tuner_attach(struct dvb_usb_adapter *adap) { return dvb_attach(qt1010_attach, adap->fe[0], &adap_to_d(adap)->i2c_adap, &au6610_qt1010_config) == NULL ? -ENODEV : 0; } static int au6610_init(struct dvb_usb_device *d) { /* TODO: this functionality belongs likely to the streaming control */ /* bInterfaceNumber 0, bAlternateSetting 5 */ return usb_set_interface(d->udev, 0, 5); } static struct dvb_usb_device_properties au6610_props = { .driver_name = KBUILD_MODNAME, .owner = THIS_MODULE, .adapter_nr = adapter_nr, .i2c_algo = &au6610_i2c_algo, .frontend_attach = au6610_zl10353_frontend_attach, .tuner_attach = au6610_qt1010_tuner_attach, .init = au6610_init, .num_adapters = 1, .adapter = { { .stream = DVB_USB_STREAM_ISOC(0x82, 5, 40, 942, 1), }, }, }; static const struct usb_device_id au6610_id_table[] = { { DVB_USB_DEVICE(USB_VID_ALCOR_MICRO, USB_PID_SIGMATEK_DVB_110, &au6610_props, "Sigmatek DVB-110", NULL) }, { } }; MODULE_DEVICE_TABLE(usb, au6610_id_table); static struct usb_driver au6610_driver = { .name = KBUILD_MODNAME, .id_table = au6610_id_table, .probe = dvb_usbv2_probe, .disconnect = dvb_usbv2_disconnect, .suspend = dvb_usbv2_suspend, .resume = dvb_usbv2_resume, .reset_resume = dvb_usbv2_reset_resume, .no_dynamic_id = 1, .soft_unbind = 1, }; module_usb_driver(au6610_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("Driver for Alcor Micro AU6610 DVB-T USB2.0"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL");
6 6 6 6 6 6 6 6 6 7 8 8 8 8 8 8 8 8 8 2 6 6 8 8 6 6 3 3 4 7 1 1 5 2 4 6 6 6 7 7 7 7 7 7 7 1 1 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 // SPDX-License-Identifier: GPL-2.0-only /* * net/sched/sch_sfb.c Stochastic Fair Blue * * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr> * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com> * * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue: * A New Class of Active Queue Management Algorithms. * U. Michigan CSE-TR-387-99, April 1999. * * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/random.h> #include <linux/siphash.h> #include <net/ip.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/inet_ecn.h> /* * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level) * This implementation uses L = 8 and N = 16 * This permits us to split one 32bit hash (provided per packet by rxhash or * external classifier) into 8 subhashes of 4 bits. */ #define SFB_BUCKET_SHIFT 4 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */ #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1) #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */ /* SFB algo uses a virtual queue, named "bin" */ struct sfb_bucket { u16 qlen; /* length of virtual queue */ u16 p_mark; /* marking probability */ }; /* We use a double buffering right before hash change * (Section 4.4 of SFB reference : moving hash functions) */ struct sfb_bins { siphash_key_t perturbation; /* siphash key */ struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; }; struct sfb_sched_data { struct Qdisc *qdisc; struct tcf_proto __rcu *filter_list; struct tcf_block *block; unsigned long rehash_interval; unsigned long warmup_time; /* double buffering warmup time in jiffies */ u32 max; u32 bin_size; /* maximum queue length per bin */ u32 increment; /* d1 */ u32 decrement; /* d2 */ u32 limit; /* HARD maximal queue length */ u32 penalty_rate; u32 penalty_burst; u32 tokens_avail; unsigned long rehash_time; unsigned long token_time; u8 slot; /* current active bins (0 or 1) */ bool double_buffering; struct sfb_bins bins[2]; struct { u32 earlydrop; u32 penaltydrop; u32 bucketdrop; u32 queuedrop; u32 childdrop; /* drops in child qdisc */ u32 marked; /* ECN mark */ } stats; }; /* * Each queued skb might be hashed on one or two bins * We store in skb_cb the two hash values. * (A zero value means double buffering was not used) */ struct sfb_skb_cb { u32 hashes[2]; }; static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) { qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb)); return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; } /* * If using 'internal' SFB flow classifier, hash comes from skb rxhash * If using external classifier, hash comes from the classid. */ static u32 sfb_hash(const struct sk_buff *skb, u32 slot) { return sfb_skb_cb(skb)->hashes[slot]; } /* Probabilities are coded as Q0.16 fixed-point values, * with 0xFFFF representing 65535/65536 (almost 1.0) * Addition and subtraction are saturating in [0, 65535] */ static u32 prob_plus(u32 p1, u32 p2) { u32 res = p1 + p2; return min_t(u32, res, SFB_MAX_PROB); } static u32 prob_minus(u32 p1, u32 p2) { return p1 > p2 ? p1 - p2 : 0; } static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) { int i; struct sfb_bucket *b = &q->bins[slot].bins[0][0]; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; sfbhash >>= SFB_BUCKET_SHIFT; if (b[hash].qlen < 0xFFFF) b[hash].qlen++; b += SFB_NUMBUCKETS; /* next level */ } } static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q) { u32 sfbhash; sfbhash = cb->hashes[0]; if (sfbhash) increment_one_qlen(sfbhash, 0, q); sfbhash = cb->hashes[1]; if (sfbhash) increment_one_qlen(sfbhash, 1, q); } static void decrement_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) { int i; struct sfb_bucket *b = &q->bins[slot].bins[0][0]; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; sfbhash >>= SFB_BUCKET_SHIFT; if (b[hash].qlen > 0) b[hash].qlen--; b += SFB_NUMBUCKETS; /* next level */ } } static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) { u32 sfbhash; sfbhash = sfb_hash(skb, 0); if (sfbhash) decrement_one_qlen(sfbhash, 0, q); sfbhash = sfb_hash(skb, 1); if (sfbhash) decrement_one_qlen(sfbhash, 1, q); } static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) { b->p_mark = prob_minus(b->p_mark, q->decrement); } static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q) { b->p_mark = prob_plus(b->p_mark, q->increment); } static void sfb_zero_all_buckets(struct sfb_sched_data *q) { memset(&q->bins, 0, sizeof(q->bins)); } /* * compute max qlen, max p_mark, and avg p_mark */ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q) { int i; u32 qlen = 0, prob = 0, totalpm = 0; const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) { if (qlen < b->qlen) qlen = b->qlen; totalpm += b->p_mark; if (prob < b->p_mark) prob = b->p_mark; b++; } *prob_r = prob; *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS); return qlen; } static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) { get_random_bytes(&q->bins[slot].perturbation, sizeof(q->bins[slot].perturbation)); } static void sfb_swap_slot(struct sfb_sched_data *q) { sfb_init_perturbation(q->slot, q); q->slot ^= 1; q->double_buffering = false; } /* Non elastic flows are allowed to use part of the bandwidth, expressed * in "penalty_rate" packets per second, with "penalty_burst" burst */ static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) { if (q->penalty_rate == 0 || q->penalty_burst == 0) return true; if (q->tokens_avail < 1) { unsigned long age = min(10UL * HZ, jiffies - q->token_time); q->tokens_avail = (age * q->penalty_rate) / HZ; if (q->tokens_avail > q->penalty_burst) q->tokens_avail = q->penalty_burst; q->token_time = jiffies; if (q->tokens_avail < 1) return true; } q->tokens_avail--; return false; } static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl, int *qerr, u32 *salt) { struct tcf_result res; int result; result = tcf_classify(skb, NULL, fl, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: case TC_ACT_TRAP: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; fallthrough; case TC_ACT_SHOT: return false; } #endif *salt = TC_H_MIN(res.classid); return true; } return false; } static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct sfb_sched_data *q = qdisc_priv(sch); unsigned int len = qdisc_pkt_len(skb); struct Qdisc *child = q->qdisc; struct tcf_proto *fl; struct sfb_skb_cb cb; int i; u32 p_min = ~0; u32 minqlen = ~0; u32 r, sfbhash; u32 slot = q->slot; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (unlikely(sch->q.qlen >= q->limit)) { qdisc_qstats_overlimit(sch); q->stats.queuedrop++; goto drop; } if (q->rehash_interval > 0) { unsigned long limit = q->rehash_time + q->rehash_interval; if (unlikely(time_after(jiffies, limit))) { sfb_swap_slot(q); q->rehash_time = jiffies; } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && time_after(jiffies, limit - q->warmup_time))) { q->double_buffering = true; } } fl = rcu_dereference_bh(q->filter_list); if (fl) { u32 salt; /* If using external classifiers, get result and record it. */ if (!sfb_classify(skb, fl, &ret, &salt)) goto other_drop; sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation); } else { sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation); } if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfbhash >>= SFB_BUCKET_SHIFT; if (b->qlen == 0) decrement_prob(b, q); else if (b->qlen >= q->bin_size) increment_prob(b, q); if (minqlen > b->qlen) minqlen = b->qlen; if (p_min > b->p_mark) p_min = b->p_mark; } slot ^= 1; sfb_skb_cb(skb)->hashes[slot] = 0; if (unlikely(minqlen >= q->max)) { qdisc_qstats_overlimit(sch); q->stats.bucketdrop++; goto drop; } if (unlikely(p_min >= SFB_MAX_PROB)) { /* Inelastic flow */ if (q->double_buffering) { sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfbhash >>= SFB_BUCKET_SHIFT; if (b->qlen == 0) decrement_prob(b, q); else if (b->qlen >= q->bin_size) increment_prob(b, q); } } if (sfb_rate_limit(skb, q)) { qdisc_qstats_overlimit(sch); q->stats.penaltydrop++; goto drop; } goto enqueue; } r = get_random_u16() & SFB_MAX_PROB; if (unlikely(r < p_min)) { if (unlikely(p_min > SFB_MAX_PROB / 2)) { /* If we're marking that many packets, then either * this flow is unresponsive, or we're badly congested. * In either case, we want to start dropping packets. */ if (r < (p_min - SFB_MAX_PROB / 2) * 2) { q->stats.earlydrop++; goto drop; } } if (INET_ECN_set_ce(skb)) { q->stats.marked++; } else { q->stats.earlydrop++; goto drop; } } enqueue: memcpy(&cb, sfb_skb_cb(skb), sizeof(cb)); ret = qdisc_enqueue(skb, child, to_free); if (likely(ret == NET_XMIT_SUCCESS)) { sch->qstats.backlog += len; sch->q.qlen++; increment_qlen(&cb, q); } else if (net_xmit_drop_count(ret)) { q->stats.childdrop++; qdisc_qstats_drop(sch); } return ret; drop: qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; other_drop: if (ret & __NET_XMIT_BYPASS) qdisc_qstats_drop(sch); kfree_skb(skb); return ret; } static struct sk_buff *sfb_dequeue(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; struct sk_buff *skb; skb = child->dequeue(q->qdisc); if (skb) { qdisc_bstats_update(sch, skb); qdisc_qstats_backlog_dec(sch, skb); sch->q.qlen--; decrement_qlen(skb, q); } return skb; } static struct sk_buff *sfb_peek(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; return child->ops->peek(child); } /* No sfb_drop -- impossible since the child doesn't return the dropped skb. */ static void sfb_reset(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); if (likely(q->qdisc)) qdisc_reset(q->qdisc); q->slot = 0; q->double_buffering = false; sfb_zero_all_buckets(q); sfb_init_perturbation(0, q); } static void sfb_destroy(struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); tcf_block_put(q->block); qdisc_put(q->qdisc); } static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = { [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) }, }; static const struct tc_sfb_qopt sfb_default_ops = { .rehash_interval = 600 * MSEC_PER_SEC, .warmup_time = 60 * MSEC_PER_SEC, .limit = 0, .max = 25, .bin_size = 20, .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */ .decrement = (SFB_MAX_PROB + 3000) / 6000, .penalty_rate = 10, .penalty_burst = 20, }; static int sfb_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child, *old; struct nlattr *tb[TCA_SFB_MAX + 1]; const struct tc_sfb_qopt *ctl = &sfb_default_ops; u32 limit; int err; if (opt) { err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt, sfb_policy, NULL); if (err < 0) return -EINVAL; if (tb[TCA_SFB_PARMS] == NULL) return -EINVAL; ctl = nla_data(tb[TCA_SFB_PARMS]); } limit = ctl->limit; if (limit == 0) limit = qdisc_dev(sch)->tx_queue_len; child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack); if (IS_ERR(child)) return PTR_ERR(child); if (child != &noop_qdisc) qdisc_hash_add(child, true); sch_tree_lock(sch); qdisc_purge_queue(q->qdisc); old = q->qdisc; q->qdisc = child; q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval); q->warmup_time = msecs_to_jiffies(ctl->warmup_time); q->rehash_time = jiffies; q->limit = limit; q->increment = ctl->increment; q->decrement = ctl->decrement; q->max = ctl->max; q->bin_size = ctl->bin_size; q->penalty_rate = ctl->penalty_rate; q->penalty_burst = ctl->penalty_burst; q->tokens_avail = ctl->penalty_burst; q->token_time = jiffies; q->slot = 0; q->double_buffering = false; sfb_zero_all_buckets(q); sfb_init_perturbation(0, q); sfb_init_perturbation(1, q); sch_tree_unlock(sch); qdisc_put(old); return 0; } static int sfb_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { struct sfb_sched_data *q = qdisc_priv(sch); int err; err = tcf_block_get(&q->block, &q->filter_list, sch, extack); if (err) return err; q->qdisc = &noop_qdisc; return sfb_change(sch, opt, extack); } static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb) { struct sfb_sched_data *q = qdisc_priv(sch); struct nlattr *opts; struct tc_sfb_qopt opt = { .rehash_interval = jiffies_to_msecs(q->rehash_interval), .warmup_time = jiffies_to_msecs(q->warmup_time), .limit = q->limit, .max = q->max, .bin_size = q->bin_size, .increment = q->increment, .decrement = q->decrement, .penalty_rate = q->penalty_rate, .penalty_burst = q->penalty_burst, }; sch->qstats.backlog = q->qdisc->qstats.backlog; opts = nla_nest_start_noflag(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt)) goto nla_put_failure; return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct sfb_sched_data *q = qdisc_priv(sch); struct tc_sfb_xstats st = { .earlydrop = q->stats.earlydrop, .penaltydrop = q->stats.penaltydrop, .bucketdrop = q->stats.bucketdrop, .queuedrop = q->stats.queuedrop, .childdrop = q->stats.childdrop, .marked = q->stats.marked, }; st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); return gnet_stats_copy_app(d, &st, sizeof(st)); } static int sfb_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { return -ENOSYS; } static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old, struct netlink_ext_ack *extack) { struct sfb_sched_data *q = qdisc_priv(sch); if (new == NULL) new = &noop_qdisc; *old = qdisc_replace(sch, new, &q->qdisc); return 0; } static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg) { struct sfb_sched_data *q = qdisc_priv(sch); return q->qdisc; } static unsigned long sfb_find(struct Qdisc *sch, u32 classid) { return 1; } static void sfb_unbind(struct Qdisc *sch, unsigned long arg) { } static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg, struct netlink_ext_ack *extack) { return -ENOSYS; } static int sfb_delete(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { return -ENOSYS; } static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) { if (!walker->stop) { tc_qdisc_stats_dump(sch, 1, walker); } } static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl, struct netlink_ext_ack *extack) { struct sfb_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return q->block; } static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return 0; } static const struct Qdisc_class_ops sfb_class_ops = { .graft = sfb_graft, .leaf = sfb_leaf, .find = sfb_find, .change = sfb_change_class, .delete = sfb_delete, .walk = sfb_walk, .tcf_block = sfb_tcf_block, .bind_tcf = sfb_bind, .unbind_tcf = sfb_unbind, .dump = sfb_dump_class, }; static struct Qdisc_ops sfb_qdisc_ops __read_mostly = { .id = "sfb", .priv_size = sizeof(struct sfb_sched_data), .cl_ops = &sfb_class_ops, .enqueue = sfb_enqueue, .dequeue = sfb_dequeue, .peek = sfb_peek, .init = sfb_init, .reset = sfb_reset, .destroy = sfb_destroy, .change = sfb_change, .dump = sfb_dump, .dump_stats = sfb_dump_stats, .owner = THIS_MODULE, }; MODULE_ALIAS_NET_SCH("sfb"); static int __init sfb_module_init(void) { return register_qdisc(&sfb_qdisc_ops); } static void __exit sfb_module_exit(void) { unregister_qdisc(&sfb_qdisc_ops); } module_init(sfb_module_init) module_exit(sfb_module_exit) MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline"); MODULE_AUTHOR("Juliusz Chroboczek"); MODULE_AUTHOR("Eric Dumazet"); MODULE_LICENSE("GPL");
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 /* SPDX-License-Identifier: GPL-2.0-only */ /* * File: af_phonet.h * * Phonet sockets kernel definitions * * Copyright (C) 2008 Nokia Corporation. */ #ifndef AF_PHONET_H #define AF_PHONET_H #include <linux/phonet.h> #include <linux/skbuff.h> #include <net/sock.h> /* * The lower layers may not require more space, ever. Make sure it's * enough. */ #define MAX_PHONET_HEADER (8 + MAX_HEADER) /* * Every Phonet* socket has this structure first in its * protocol-specific structure under name c. */ struct pn_sock { struct sock sk; u16 sobject; u16 dobject; u8 resource; }; static inline struct pn_sock *pn_sk(struct sock *sk) { return (struct pn_sock *)sk; } extern const struct proto_ops phonet_dgram_ops; void pn_sock_init(void); struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *sa); void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb); void phonet_get_local_port_range(int *min, int *max); int pn_sock_hash(struct sock *sk); void pn_sock_unhash(struct sock *sk); int pn_sock_get_port(struct sock *sk, unsigned short sport); struct sock *pn_find_sock_by_res(struct net *net, u8 res); int pn_sock_bind_res(struct sock *sock, u8 res); int pn_sock_unbind_res(struct sock *sk, u8 res); void pn_sock_unbind_all_res(struct sock *sk); int pn_skb_send(struct sock *sk, struct sk_buff *skb, const struct sockaddr_pn *target); static inline struct phonethdr *pn_hdr(struct sk_buff *skb) { return (struct phonethdr *)skb_network_header(skb); } static inline struct phonetmsg *pn_msg(struct sk_buff *skb) { return (struct phonetmsg *)skb_transport_header(skb); } /* * Get the other party's sockaddr from received skb. The skb begins * with a Phonet header. */ static inline void pn_skb_get_src_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa) { struct phonethdr *ph = pn_hdr(skb); u16 obj = pn_object(ph->pn_sdev, ph->pn_sobj); sa->spn_family = AF_PHONET; pn_sockaddr_set_object(sa, obj); pn_sockaddr_set_resource(sa, ph->pn_res); memset(sa->spn_zero, 0, sizeof(sa->spn_zero)); } static inline void pn_skb_get_dst_sockaddr(struct sk_buff *skb, struct sockaddr_pn *sa) { struct phonethdr *ph = pn_hdr(skb); u16 obj = pn_object(ph->pn_rdev, ph->pn_robj); sa->spn_family = AF_PHONET; pn_sockaddr_set_object(sa, obj); pn_sockaddr_set_resource(sa, ph->pn_res); memset(sa->spn_zero, 0, sizeof(sa->spn_zero)); } /* Protocols in Phonet protocol family. */ struct phonet_protocol { const struct proto_ops *ops; struct proto *prot; int sock_type; }; int phonet_proto_register(unsigned int protocol, const struct phonet_protocol *pp); void phonet_proto_unregister(unsigned int protocol, const struct phonet_protocol *pp); int phonet_sysctl_init(void); void phonet_sysctl_exit(void); int isi_register(void); void isi_unregister(void); static inline bool sk_is_phonet(struct sock *sk) { return sk->sk_family == PF_PHONET; } static inline int phonet_sk_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) { int karg; switch (cmd) { case SIOCPNADDRESOURCE: case SIOCPNDELRESOURCE: if (get_user(karg, (int __user *)arg)) return -EFAULT; return sk->sk_prot->ioctl(sk, cmd, &karg); } /* A positive return value means that the ioctl was not processed */ return 1; } #endif
6 7 25 18 14 6 8 16 2 3 5 3 15 16 1 1 1 4 2 2 1 3 3 1 1 9 9 2 2 9 9 2 2 13 13 12 3 13 23 45 1 1 32 32 32 31 3 6 4 31 39 34 5 2 2 2 1 2 39 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 // SPDX-License-Identifier: GPL-2.0-only /* * irq_comm.c: Common API for in kernel interrupt controller * Copyright (c) 2007, Intel Corporation. * * Authors: * Yaozu (Eddie) Dong <Eddie.dong@intel.com> * * Copyright 2010 Red Hat, Inc. and/or its affiliates. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kvm_host.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/rculist.h> #include <trace/events/kvm.h> #include "irq.h" #include "ioapic.h" #include "lapic.h" #include "hyperv.h" #include "x86.h" #include "xen.h" static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { struct kvm_pic *pic = kvm->arch.vpic; return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); } static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, line_status); } int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, struct dest_map *dest_map) { int r = -1; struct kvm_vcpu *vcpu, *lowest = NULL; unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]; unsigned int dest_vcpus = 0; if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map)) return r; if (irq->dest_mode == APIC_DEST_PHYSICAL && irq->dest_id == 0xff && kvm_lowest_prio_delivery(irq)) { pr_info("apic: phys broadcast and lowest prio\n"); irq->delivery_mode = APIC_DM_FIXED; } memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap)); kvm_for_each_vcpu(i, vcpu, kvm) { if (!kvm_apic_present(vcpu)) continue; if (!kvm_apic_match_dest(vcpu, src, irq->shorthand, irq->dest_id, irq->dest_mode)) continue; if (!kvm_lowest_prio_delivery(irq)) { if (r < 0) r = 0; r += kvm_apic_set_irq(vcpu, irq, dest_map); } else if (kvm_apic_sw_enabled(vcpu->arch.apic)) { if (!kvm_vector_hashing_enabled()) { if (!lowest) lowest = vcpu; else if (kvm_apic_compare_prio(vcpu, lowest) < 0) lowest = vcpu; } else { __set_bit(i, dest_vcpu_bitmap); dest_vcpus++; } } } if (dest_vcpus != 0) { int idx = kvm_vector_to_index(irq->vector, dest_vcpus, dest_vcpu_bitmap, KVM_MAX_VCPUS); lowest = kvm_get_vcpu(kvm, idx); } if (lowest) r = kvm_apic_set_irq(lowest, irq, dest_map); return r; } void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, struct kvm_lapic_irq *irq) { struct msi_msg msg = { .address_lo = e->msi.address_lo, .address_hi = e->msi.address_hi, .data = e->msi.data }; trace_kvm_msi_set_irq(msg.address_lo | (kvm->arch.x2apic_format ? (u64)msg.address_hi << 32 : 0), msg.data); irq->dest_id = x86_msi_msg_get_destid(&msg, kvm->arch.x2apic_format); irq->vector = msg.arch_data.vector; irq->dest_mode = kvm_lapic_irq_dest_mode(msg.arch_addr_lo.dest_mode_logical); irq->trig_mode = msg.arch_data.is_level; irq->delivery_mode = msg.arch_data.delivery_mode << 8; irq->msi_redir_hint = msg.arch_addr_lo.redirect_hint; irq->level = 1; irq->shorthand = APIC_DEST_NOSHORT; } EXPORT_SYMBOL_GPL(kvm_set_msi_irq); static inline bool kvm_msi_route_invalid(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e) { return kvm->arch.x2apic_format && (e->msi.address_hi & 0xff); } int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { struct kvm_lapic_irq irq; if (kvm_msi_route_invalid(kvm, e)) return -EINVAL; if (!level) return -1; kvm_set_msi_irq(kvm, e, &irq); return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL); } #ifdef CONFIG_KVM_HYPERV static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { if (!level) return -1; return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); } #endif int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, int level, bool line_status) { struct kvm_lapic_irq irq; int r; switch (e->type) { #ifdef CONFIG_KVM_HYPERV case KVM_IRQ_ROUTING_HV_SINT: return kvm_hv_set_sint(e, kvm, irq_source_id, level, line_status); #endif case KVM_IRQ_ROUTING_MSI: if (kvm_msi_route_invalid(kvm, e)) return -EINVAL; kvm_set_msi_irq(kvm, e, &irq); if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) return r; break; #ifdef CONFIG_KVM_XEN case KVM_IRQ_ROUTING_XEN_EVTCHN: if (!level) return -1; return kvm_xen_set_evtchn_fast(&e->xen_evtchn, kvm); #endif default: break; } return -EWOULDBLOCK; } int kvm_request_irq_source_id(struct kvm *kvm) { unsigned long *bitmap = &kvm->arch.irq_sources_bitmap; int irq_source_id; mutex_lock(&kvm->irq_lock); irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG); if (irq_source_id >= BITS_PER_LONG) { pr_warn("exhausted allocatable IRQ sources!\n"); irq_source_id = -EFAULT; goto unlock; } ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); set_bit(irq_source_id, bitmap); unlock: mutex_unlock(&kvm->irq_lock); return irq_source_id; } void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id) { ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID); ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID); mutex_lock(&kvm->irq_lock); if (irq_source_id < 0 || irq_source_id >= BITS_PER_LONG) { pr_err("IRQ source ID out of range!\n"); goto unlock; } clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap); if (!irqchip_kernel(kvm)) goto unlock; kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id); kvm_pic_clear_all(kvm->arch.vpic, irq_source_id); unlock: mutex_unlock(&kvm->irq_lock); } void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn) { mutex_lock(&kvm->irq_lock); kimn->irq = irq; hlist_add_head_rcu(&kimn->link, &kvm->arch.mask_notifier_list); mutex_unlock(&kvm->irq_lock); } void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq, struct kvm_irq_mask_notifier *kimn) { mutex_lock(&kvm->irq_lock); hlist_del_rcu(&kimn->link); mutex_unlock(&kvm->irq_lock); synchronize_srcu(&kvm->irq_srcu); } void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, bool mask) { struct kvm_irq_mask_notifier *kimn; int idx, gsi; idx = srcu_read_lock(&kvm->irq_srcu); gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin); if (gsi != -1) hlist_for_each_entry_rcu(kimn, &kvm->arch.mask_notifier_list, link) if (kimn->irq == gsi) kimn->func(kimn, mask); srcu_read_unlock(&kvm->irq_srcu, idx); } bool kvm_arch_can_set_irq_routing(struct kvm *kvm) { return irqchip_in_kernel(kvm); } int kvm_set_routing_entry(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e, const struct kvm_irq_routing_entry *ue) { /* We can't check irqchip_in_kernel() here as some callers are * currently initializing the irqchip. Other callers should therefore * check kvm_arch_can_set_irq_routing() before calling this function. */ switch (ue->type) { case KVM_IRQ_ROUTING_IRQCHIP: if (irqchip_split(kvm)) return -EINVAL; e->irqchip.pin = ue->u.irqchip.pin; switch (ue->u.irqchip.irqchip) { case KVM_IRQCHIP_PIC_SLAVE: e->irqchip.pin += PIC_NUM_PINS / 2; fallthrough; case KVM_IRQCHIP_PIC_MASTER: if (ue->u.irqchip.pin >= PIC_NUM_PINS / 2) return -EINVAL; e->set = kvm_set_pic_irq; break; case KVM_IRQCHIP_IOAPIC: if (ue->u.irqchip.pin >= KVM_IOAPIC_NUM_PINS) return -EINVAL; e->set = kvm_set_ioapic_irq; break; default: return -EINVAL; } e->irqchip.irqchip = ue->u.irqchip.irqchip; break; case KVM_IRQ_ROUTING_MSI: e->set = kvm_set_msi; e->msi.address_lo = ue->u.msi.address_lo; e->msi.address_hi = ue->u.msi.address_hi; e->msi.data = ue->u.msi.data; if (kvm_msi_route_invalid(kvm, e)) return -EINVAL; break; #ifdef CONFIG_KVM_HYPERV case KVM_IRQ_ROUTING_HV_SINT: e->set = kvm_hv_set_sint; e->hv_sint.vcpu = ue->u.hv_sint.vcpu; e->hv_sint.sint = ue->u.hv_sint.sint; break; #endif #ifdef CONFIG_KVM_XEN case KVM_IRQ_ROUTING_XEN_EVTCHN: return kvm_xen_setup_evtchn(kvm, e, ue); #endif default: return -EINVAL; } return 0; } bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq, struct kvm_vcpu **dest_vcpu) { int r = 0; unsigned long i; struct kvm_vcpu *vcpu; if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu)) return true; kvm_for_each_vcpu(i, vcpu, kvm) { if (!kvm_apic_present(vcpu)) continue; if (!kvm_apic_match_dest(vcpu, NULL, irq->shorthand, irq->dest_id, irq->dest_mode)) continue; if (++r == 2) return false; *dest_vcpu = vcpu; } return r == 1; } EXPORT_SYMBOL_GPL(kvm_intr_is_single_vcpu); #define IOAPIC_ROUTING_ENTRY(irq) \ { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } } #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq) #define PIC_ROUTING_ENTRY(irq) \ { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \ .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } } #define ROUTING_ENTRY2(irq) \ IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq) static const struct kvm_irq_routing_entry default_routing[] = { ROUTING_ENTRY2(0), ROUTING_ENTRY2(1), ROUTING_ENTRY2(2), ROUTING_ENTRY2(3), ROUTING_ENTRY2(4), ROUTING_ENTRY2(5), ROUTING_ENTRY2(6), ROUTING_ENTRY2(7), ROUTING_ENTRY2(8), ROUTING_ENTRY2(9), ROUTING_ENTRY2(10), ROUTING_ENTRY2(11), ROUTING_ENTRY2(12), ROUTING_ENTRY2(13), ROUTING_ENTRY2(14), ROUTING_ENTRY2(15), ROUTING_ENTRY1(16), ROUTING_ENTRY1(17), ROUTING_ENTRY1(18), ROUTING_ENTRY1(19), ROUTING_ENTRY1(20), ROUTING_ENTRY1(21), ROUTING_ENTRY1(22), ROUTING_ENTRY1(23), }; int kvm_setup_default_irq_routing(struct kvm *kvm) { return kvm_set_irq_routing(kvm, default_routing, ARRAY_SIZE(default_routing), 0); } void kvm_arch_post_irq_routing_update(struct kvm *kvm) { if (!irqchip_split(kvm)) return; kvm_make_scan_ioapic_request(kvm); } void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors) { struct kvm *kvm = vcpu->kvm; struct kvm_kernel_irq_routing_entry *entry; struct kvm_irq_routing_table *table; u32 i, nr_ioapic_pins; int idx; idx = srcu_read_lock(&kvm->irq_srcu); table = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu); nr_ioapic_pins = min_t(u32, table->nr_rt_entries, kvm->arch.nr_reserved_ioapic_pins); for (i = 0; i < nr_ioapic_pins; ++i) { hlist_for_each_entry(entry, &table->map[i], link) { struct kvm_lapic_irq irq; if (entry->type != KVM_IRQ_ROUTING_MSI) continue; kvm_set_msi_irq(vcpu->kvm, entry, &irq); if (irq.trig_mode && (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT, irq.dest_id, irq.dest_mode) || kvm_apic_pending_eoi(vcpu, irq.vector))) __set_bit(irq.vector, ioapic_handled_vectors); } } srcu_read_unlock(&kvm->irq_srcu, idx); } void kvm_arch_irq_routing_update(struct kvm *kvm) { #ifdef CONFIG_KVM_HYPERV kvm_hv_irq_routing_update(kvm); #endif }
46 46 67 22 21 12 31 12 30 19 19 18 19 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 // SPDX-License-Identifier: GPL-2.0-only /* * v4l2-fh.c * * V4L2 file handles. * * Copyright (C) 2009--2010 Nokia Corporation. * * Contact: Sakari Ailus <sakari.ailus@iki.fi> */ #include <linux/bitops.h> #include <linux/slab.h> #include <linux/export.h> #include <media/v4l2-dev.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mc.h> void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev) { fh->vdev = vdev; /* Inherit from video_device. May be overridden by the driver. */ fh->ctrl_handler = vdev->ctrl_handler; INIT_LIST_HEAD(&fh->list); set_bit(V4L2_FL_USES_V4L2_FH, &fh->vdev->flags); /* * determine_valid_ioctls() does not know if struct v4l2_fh * is used by this driver, but here we do. So enable the * prio ioctls here. */ set_bit(_IOC_NR(VIDIOC_G_PRIORITY), vdev->valid_ioctls); set_bit(_IOC_NR(VIDIOC_S_PRIORITY), vdev->valid_ioctls); fh->prio = V4L2_PRIORITY_UNSET; init_waitqueue_head(&fh->wait); INIT_LIST_HEAD(&fh->available); INIT_LIST_HEAD(&fh->subscribed); fh->sequence = -1; mutex_init(&fh->subscribe_lock); } EXPORT_SYMBOL_GPL(v4l2_fh_init); void v4l2_fh_add(struct v4l2_fh *fh) { unsigned long flags; v4l2_prio_open(fh->vdev->prio, &fh->prio); spin_lock_irqsave(&fh->vdev->fh_lock, flags); list_add(&fh->list, &fh->vdev->fh_list); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); } EXPORT_SYMBOL_GPL(v4l2_fh_add); int v4l2_fh_open(struct file *filp) { struct video_device *vdev = video_devdata(filp); struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL); filp->private_data = fh; if (fh == NULL) return -ENOMEM; v4l2_fh_init(fh, vdev); v4l2_fh_add(fh); return 0; } EXPORT_SYMBOL_GPL(v4l2_fh_open); void v4l2_fh_del(struct v4l2_fh *fh) { unsigned long flags; spin_lock_irqsave(&fh->vdev->fh_lock, flags); list_del_init(&fh->list); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); v4l2_prio_close(fh->vdev->prio, fh->prio); } EXPORT_SYMBOL_GPL(v4l2_fh_del); void v4l2_fh_exit(struct v4l2_fh *fh) { if (fh->vdev == NULL) return; v4l_disable_media_source(fh->vdev); v4l2_event_unsubscribe_all(fh); mutex_destroy(&fh->subscribe_lock); fh->vdev = NULL; } EXPORT_SYMBOL_GPL(v4l2_fh_exit); int v4l2_fh_release(struct file *filp) { struct v4l2_fh *fh = filp->private_data; if (fh) { v4l2_fh_del(fh); v4l2_fh_exit(fh); kfree(fh); filp->private_data = NULL; } return 0; } EXPORT_SYMBOL_GPL(v4l2_fh_release); int v4l2_fh_is_singular(struct v4l2_fh *fh) { unsigned long flags; int is_singular; if (fh == NULL || fh->vdev == NULL) return 0; spin_lock_irqsave(&fh->vdev->fh_lock, flags); is_singular = list_is_singular(&fh->list); spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); return is_singular; } EXPORT_SYMBOL_GPL(v4l2_fh_is_singular);
2 124 128 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 /* SPDX-License-Identifier: GPL-2.0-or-later */ /* AF_RXRPC internal definitions * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/atomic.h> #include <linux/seqlock.h> #include <linux/win_minmax.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include <keys/rxrpc-type.h> #include "protocol.h" #define FCRYPT_BSIZE 8 struct rxrpc_crypt { union { u8 x[FCRYPT_BSIZE]; __be32 n[2]; }; } __attribute__((aligned(8))); #define rxrpc_queue_work(WS) queue_work(rxrpc_workqueue, (WS)) #define rxrpc_queue_delayed_work(WS,D) \ queue_delayed_work(rxrpc_workqueue, (WS), (D)) struct key_preparsed_payload; struct rxrpc_connection; struct rxrpc_txbuf; /* * Mark applied to socket buffers in skb->mark. skb->priority is used * to pass supplementary information. */ enum rxrpc_skb_mark { RXRPC_SKB_MARK_PACKET, /* Received packet */ RXRPC_SKB_MARK_ERROR, /* Error notification */ RXRPC_SKB_MARK_SERVICE_CONN_SECURED, /* Service connection response has been verified */ RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */ RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */ }; /* * sk_state for RxRPC sockets */ enum { RXRPC_UNBOUND = 0, RXRPC_CLIENT_UNBOUND, /* Unbound socket used as client */ RXRPC_CLIENT_BOUND, /* client local address bound */ RXRPC_SERVER_BOUND, /* server local address bound */ RXRPC_SERVER_BOUND2, /* second server local address bound */ RXRPC_SERVER_LISTENING, /* server listening for connections */ RXRPC_SERVER_LISTEN_DISABLED, /* server listening disabled */ RXRPC_CLOSE, /* socket is being closed */ }; /* * Per-network namespace data. */ struct rxrpc_net { struct proc_dir_entry *proc_net; /* Subdir in /proc/net */ u32 epoch; /* Local epoch for detecting local-end reset */ struct list_head calls; /* List of calls active in this namespace */ spinlock_t call_lock; /* Lock for ->calls */ atomic_t nr_calls; /* Count of allocated calls */ atomic_t nr_conns; struct list_head bundle_proc_list; /* List of bundles for proc */ struct list_head conn_proc_list; /* List of conns in this namespace for proc */ struct list_head service_conns; /* Service conns in this namespace */ rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */ struct work_struct service_conn_reaper; struct timer_list service_conn_reap_timer; bool live; atomic_t nr_client_conns; struct hlist_head local_endpoints; struct mutex local_mutex; /* Lock for ->local_endpoints */ DECLARE_HASHTABLE (peer_hash, 10); spinlock_t peer_hash_lock; /* Lock for ->peer_hash */ #define RXRPC_KEEPALIVE_TIME 20 /* NAT keepalive time in seconds */ u8 peer_keepalive_cursor; time64_t peer_keepalive_base; struct list_head peer_keepalive[32]; struct list_head peer_keepalive_new; struct timer_list peer_keepalive_timer; struct work_struct peer_keepalive_work; atomic_t stat_tx_data; atomic_t stat_tx_data_retrans; atomic_t stat_tx_data_send; atomic_t stat_tx_data_send_frag; atomic_t stat_tx_data_send_fail; atomic_t stat_tx_data_underflow; atomic_t stat_tx_data_cwnd_reset; atomic_t stat_rx_data; atomic_t stat_rx_data_reqack; atomic_t stat_rx_data_jumbo; atomic_t stat_tx_ack_fill; atomic_t stat_tx_ack_send; atomic_t stat_tx_ack_skip; atomic_t stat_tx_acks[256]; atomic_t stat_rx_acks[256]; atomic_t stat_why_req_ack[8]; atomic_t stat_io_loop; }; /* * Service backlog preallocation. * * This contains circular buffers of preallocated peers, connections and calls * for incoming service calls and their head and tail pointers. This allows * calls to be set up in the data_ready handler, thereby avoiding the need to * shuffle packets around so much. */ struct rxrpc_backlog { unsigned short peer_backlog_head; unsigned short peer_backlog_tail; unsigned short conn_backlog_head; unsigned short conn_backlog_tail; unsigned short call_backlog_head; unsigned short call_backlog_tail; #define RXRPC_BACKLOG_MAX 32 struct rxrpc_peer *peer_backlog[RXRPC_BACKLOG_MAX]; struct rxrpc_connection *conn_backlog[RXRPC_BACKLOG_MAX]; struct rxrpc_call *call_backlog[RXRPC_BACKLOG_MAX]; }; /* * RxRPC socket definition */ struct rxrpc_sock { /* WARNING: sk has to be the first member */ struct sock sk; rxrpc_notify_new_call_t notify_new_call; /* Func to notify of new call */ rxrpc_discard_new_call_t discard_new_call; /* Func to discard a new call */ struct rxrpc_local *local; /* local endpoint */ struct rxrpc_backlog *backlog; /* Preallocation for services */ spinlock_t incoming_lock; /* Incoming call vs service shutdown lock */ struct list_head sock_calls; /* List of calls owned by this socket */ struct list_head to_be_accepted; /* calls awaiting acceptance */ struct list_head recvmsg_q; /* Calls awaiting recvmsg's attention */ spinlock_t recvmsg_lock; /* Lock for recvmsg_q */ struct key *key; /* security for this socket */ struct key *securities; /* list of server security descriptors */ struct rb_root calls; /* User ID -> call mapping */ unsigned long flags; #define RXRPC_SOCK_CONNECTED 0 /* connect_srx is set */ rwlock_t call_lock; /* lock for calls */ u32 min_sec_level; /* minimum security level */ #define RXRPC_SECURITY_MAX RXRPC_SECURITY_ENCRYPT bool exclusive; /* Exclusive connection for a client socket */ u16 second_service; /* Additional service bound to the endpoint */ struct { /* Service upgrade information */ u16 from; /* Service ID to upgrade (if not 0) */ u16 to; /* service ID to upgrade to */ } service_upgrade; sa_family_t family; /* Protocol family created with */ struct sockaddr_rxrpc srx; /* Primary Service/local addresses */ struct sockaddr_rxrpc connect_srx; /* Default client address from connect() */ }; #define rxrpc_sk(__sk) container_of((__sk), struct rxrpc_sock, sk) /* * CPU-byteorder normalised Rx packet header. */ struct rxrpc_host_header { u32 epoch; /* client boot timestamp */ u32 cid; /* connection and channel ID */ u32 callNumber; /* call ID (0 for connection-level packets) */ u32 seq; /* sequence number of pkt in call stream */ u32 serial; /* serial number of pkt sent to network */ u8 type; /* packet type */ u8 flags; /* packet flags */ u8 userStatus; /* app-layer defined status */ u8 securityIndex; /* security protocol ID */ union { u16 _rsvd; /* reserved */ u16 cksum; /* kerberos security checksum */ }; u16 serviceId; /* service ID */ } __packed; /* * RxRPC socket buffer private variables * - max 48 bytes (struct sk_buff::cb) */ struct rxrpc_skb_priv { union { struct rxrpc_connection *conn; /* Connection referred to (poke packet) */ struct { u16 offset; /* Offset of data */ u16 len; /* Length of data */ u8 flags; #define RXRPC_RX_VERIFIED 0x01 }; struct { rxrpc_seq_t first_ack; /* First packet in acks table */ rxrpc_seq_t prev_ack; /* Highest seq seen */ rxrpc_serial_t acked_serial; /* Packet in response to (or 0) */ u8 reason; /* Reason for ack */ u8 nr_acks; /* Number of acks+nacks */ u8 nr_nacks; /* Number of nacks */ } ack; }; struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ }; #define rxrpc_skb(__skb) ((struct rxrpc_skb_priv *) &(__skb)->cb) /* * RxRPC security module interface */ struct rxrpc_security { const char *name; /* name of this service */ u8 security_index; /* security type provided */ u32 no_key_abort; /* Abort code indicating no key */ /* Initialise a security service */ int (*init)(void); /* Clean up a security service */ void (*exit)(void); /* Parse the information from a server key */ int (*preparse_server_key)(struct key_preparsed_payload *); /* Clean up the preparse buffer after parsing a server key */ void (*free_preparse_server_key)(struct key_preparsed_payload *); /* Destroy the payload of a server key */ void (*destroy_server_key)(struct key *); /* Describe a server key */ void (*describe_server_key)(const struct key *, struct seq_file *); /* initialise a connection's security */ int (*init_connection_security)(struct rxrpc_connection *, struct rxrpc_key_token *); /* Work out how much data we can store in a packet, given an estimate * of the amount of data remaining and allocate a data buffer. */ struct rxrpc_txbuf *(*alloc_txbuf)(struct rxrpc_call *call, size_t remaining, gfp_t gfp); /* impose security on a packet */ int (*secure_packet)(struct rxrpc_call *, struct rxrpc_txbuf *); /* verify the security on a received packet */ int (*verify_packet)(struct rxrpc_call *, struct sk_buff *); /* Free crypto request on a call */ void (*free_call_crypto)(struct rxrpc_call *); /* issue a challenge */ int (*issue_challenge)(struct rxrpc_connection *); /* respond to a challenge */ int (*respond_to_challenge)(struct rxrpc_connection *, struct sk_buff *); /* verify a response */ int (*verify_response)(struct rxrpc_connection *, struct sk_buff *); /* clear connection security */ void (*clear)(struct rxrpc_connection *); }; /* * RxRPC local transport endpoint description * - owned by a single AF_RXRPC socket * - pointed to by transport socket struct sk_user_data */ struct rxrpc_local { struct rcu_head rcu; atomic_t active_users; /* Number of users of the local endpoint */ refcount_t ref; /* Number of references to the structure */ struct net *net; /* The network namespace */ struct rxrpc_net *rxnet; /* Our bits in the network namespace */ struct hlist_node link; struct socket *socket; /* my UDP socket */ struct task_struct *io_thread; struct completion io_thread_ready; /* Indication that the I/O thread started */ struct page_frag_cache tx_alloc; /* Tx control packet allocation (I/O thread only) */ struct rxrpc_sock *service; /* Service(s) listening on this endpoint */ #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY struct sk_buff_head rx_delay_queue; /* Delay injection queue */ #endif struct sk_buff_head rx_queue; /* Received packets */ struct list_head conn_attend_q; /* Conns requiring immediate attention */ struct list_head call_attend_q; /* Calls requiring immediate attention */ struct rb_root client_bundles; /* Client connection bundles by socket params */ spinlock_t client_bundles_lock; /* Lock for client_bundles */ bool kill_all_client_conns; struct list_head idle_client_conns; struct timer_list client_conn_reap_timer; unsigned long client_conn_flags; #define RXRPC_CLIENT_CONN_REAP_TIMER 0 /* The client conn reap timer expired */ spinlock_t lock; /* access lock */ rwlock_t services_lock; /* lock for services list */ int debug_id; /* debug ID for printks */ bool dead; bool service_closed; /* Service socket closed */ struct idr conn_ids; /* List of connection IDs */ struct list_head new_client_calls; /* Newly created client calls need connection */ spinlock_t client_call_lock; /* Lock for ->new_client_calls */ struct sockaddr_rxrpc srx; /* local address */ }; /* * RxRPC remote transport endpoint definition * - matched by local endpoint, remote port, address and protocol type */ struct rxrpc_peer { struct rcu_head rcu; /* This must be first */ refcount_t ref; unsigned long hash_key; struct hlist_node hash_link; struct rxrpc_local *local; struct hlist_head error_targets; /* targets for net error distribution */ struct rb_root service_conns; /* Service connections */ struct list_head keepalive_link; /* Link in net->peer_keepalive[] */ time64_t last_tx_at; /* Last time packet sent here */ seqlock_t service_conn_lock; spinlock_t lock; /* access lock */ unsigned int if_mtu; /* interface MTU for this peer */ unsigned int mtu; /* network MTU for this peer */ unsigned int maxdata; /* data size (MTU - hdrsize) */ unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ int debug_id; /* debug ID for printks */ struct sockaddr_rxrpc srx; /* remote address */ /* calculated RTT cache */ #define RXRPC_RTT_CACHE_SIZE 32 spinlock_t rtt_input_lock; /* RTT lock for input routine */ ktime_t rtt_last_req; /* Time of last RTT request */ unsigned int rtt_count; /* Number of samples we've got */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */ u32 mdev_us; /* medium deviation */ u32 mdev_max_us; /* maximal mdev for the last rtt period */ u32 rttvar_us; /* smoothed mdev_max */ u32 rto_us; /* Retransmission timeout in usec */ u8 backoff; /* Backoff timeout (as shift) */ u8 cong_ssthresh; /* Congestion slow-start threshold */ }; /* * Keys for matching a connection. */ struct rxrpc_conn_proto { union { struct { u32 epoch; /* epoch of this connection */ u32 cid; /* connection ID */ }; u64 index_key; }; }; struct rxrpc_conn_parameters { struct rxrpc_local *local; /* Representation of local endpoint */ struct rxrpc_peer *peer; /* Representation of remote endpoint */ struct key *key; /* Security details */ bool exclusive; /* T if conn is exclusive */ bool upgrade; /* T if service ID can be upgraded */ u16 service_id; /* Service ID for this connection */ u32 security_level; /* Security level selected */ }; /* * Call completion condition (state == RXRPC_CALL_COMPLETE). */ enum rxrpc_call_completion { RXRPC_CALL_SUCCEEDED, /* - Normal termination */ RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ NR__RXRPC_CALL_COMPLETIONS }; /* * Bits in the connection flags. */ enum rxrpc_conn_flag { RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */ RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */ RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */ RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */ RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */ RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */ RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */ }; #define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \ (1UL << RXRPC_CONN_FINAL_ACK_1) | \ (1UL << RXRPC_CONN_FINAL_ACK_2) | \ (1UL << RXRPC_CONN_FINAL_ACK_3)) /* * Events that can be raised upon a connection. */ enum rxrpc_conn_event { RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */ RXRPC_CONN_EV_ABORT_CALLS, /* Abort attached calls */ }; /* * The connection protocol state. */ enum rxrpc_conn_proto_state { RXRPC_CONN_UNUSED, /* Connection not yet attempted */ RXRPC_CONN_CLIENT_UNSECURED, /* Client connection needs security init */ RXRPC_CONN_CLIENT, /* Client connection */ RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */ RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */ RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */ RXRPC_CONN_SERVICE, /* Service secured connection */ RXRPC_CONN_ABORTED, /* Conn aborted */ RXRPC_CONN__NR_STATES }; /* * RxRPC client connection bundle. */ struct rxrpc_bundle { struct rxrpc_local *local; /* Representation of local endpoint */ struct rxrpc_peer *peer; /* Remote endpoint */ struct key *key; /* Security details */ struct list_head proc_link; /* Link in net->bundle_proc_list */ const struct rxrpc_security *security; /* applied security module */ refcount_t ref; atomic_t active; /* Number of active users */ unsigned int debug_id; u32 security_level; /* Security level selected */ u16 service_id; /* Service ID for this connection */ bool try_upgrade; /* True if the bundle is attempting upgrade */ bool exclusive; /* T if conn is exclusive */ bool upgrade; /* T if service ID can be upgraded */ unsigned short alloc_error; /* Error from last conn allocation */ struct rb_node local_node; /* Node in local->client_conns */ struct list_head waiting_calls; /* Calls waiting for channels */ unsigned long avail_chans; /* Mask of available channels */ unsigned int conn_ids[4]; /* Connection IDs. */ struct rxrpc_connection *conns[4]; /* The connections in the bundle (max 4) */ }; /* * RxRPC connection definition * - matched by { local, peer, epoch, conn_id, direction } * - each connection can only handle four simultaneous calls */ struct rxrpc_connection { struct rxrpc_conn_proto proto; struct rxrpc_local *local; /* Representation of local endpoint */ struct rxrpc_peer *peer; /* Remote endpoint */ struct rxrpc_net *rxnet; /* Network namespace to which call belongs */ struct key *key; /* Security details */ struct list_head attend_link; /* Link in local->conn_attend_q */ refcount_t ref; atomic_t active; /* Active count for service conns */ struct rcu_head rcu; struct list_head cache_link; unsigned char act_chans; /* Mask of active channels */ struct rxrpc_channel { unsigned long final_ack_at; /* Time at which to issue final ACK */ struct rxrpc_call *call; /* Active call */ unsigned int call_debug_id; /* call->debug_id */ u32 call_id; /* ID of current call */ u32 call_counter; /* Call ID counter */ u32 last_call; /* ID of last call */ u8 last_type; /* Type of last packet */ union { u32 last_seq; u32 last_abort; }; } channels[RXRPC_MAXCALLS]; struct timer_list timer; /* Conn event timer */ struct work_struct processor; /* connection event processor */ struct work_struct destructor; /* In-process-context destroyer */ struct rxrpc_bundle *bundle; /* Client connection bundle */ struct rb_node service_node; /* Node in peer->service_conns */ struct list_head proc_link; /* link in procfs list */ struct list_head link; /* link in master connection list */ struct sk_buff_head rx_queue; /* received conn-level packets */ struct page_frag_cache tx_data_alloc; /* Tx DATA packet allocation */ struct mutex tx_data_alloc_lock; struct mutex security_lock; /* Lock for security management */ const struct rxrpc_security *security; /* applied security module */ union { struct { struct crypto_sync_skcipher *cipher; /* encryption handle */ struct rxrpc_crypt csum_iv; /* packet checksum base */ u32 nonce; /* response re-use preventer */ } rxkad; }; unsigned long flags; unsigned long events; unsigned long idle_timestamp; /* Time at which last became idle */ spinlock_t state_lock; /* state-change lock */ enum rxrpc_conn_proto_state state; /* current state of connection */ enum rxrpc_call_completion completion; /* Completion condition */ s32 abort_code; /* Abort code of connection abort */ int debug_id; /* debug ID for printks */ rxrpc_serial_t tx_serial; /* Outgoing packet serial number counter */ unsigned int hi_serial; /* highest serial number received */ u32 service_id; /* Service ID, possibly upgraded */ u32 security_level; /* Security level selected */ u8 security_ix; /* security type */ u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */ u8 bundle_shift; /* Index into bundle->avail_chans */ bool exclusive; /* T if conn is exclusive */ bool upgrade; /* T if service ID can be upgraded */ u16 orig_service_id; /* Originally requested service ID */ short error; /* Local error code */ }; static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp) { return sp->hdr.flags & RXRPC_CLIENT_INITIATED; } static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp) { return !rxrpc_to_server(sp); } /* * Flags in call->flags. */ enum rxrpc_call_flag { RXRPC_CALL_RELEASED, /* call has been released - no more message to userspace */ RXRPC_CALL_HAS_USERID, /* has a user ID attached */ RXRPC_CALL_IS_SERVICE, /* Call is service call */ RXRPC_CALL_EXPOSED, /* The call was exposed to the world */ RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */ RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */ RXRPC_CALL_TX_ALL_ACKED, /* Last packet has been hard-acked */ RXRPC_CALL_SEND_PING, /* A ping will need to be sent */ RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */ RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */ RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */ RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */ RXRPC_CALL_KERNEL, /* The call was made by the kernel */ RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */ RXRPC_CALL_EXCLUSIVE, /* The call uses a once-only connection */ RXRPC_CALL_RX_IS_IDLE, /* recvmsg() is idle - send an ACK */ RXRPC_CALL_RECVMSG_READ_ALL, /* recvmsg() read all of the received data */ }; /* * Events that can be raised on a call. */ enum rxrpc_call_event { RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */ RXRPC_CALL_EV_INITIAL_PING, /* Send initial ping for a new service call */ }; /* * The states that a call can be in. */ enum rxrpc_call_state { RXRPC_CALL_UNINITIALISED, RXRPC_CALL_CLIENT_AWAIT_CONN, /* - client waiting for connection to become available */ RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */ RXRPC_CALL_CLIENT_AWAIT_REPLY, /* - client awaiting reply */ RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */ RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */ RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */ RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */ RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */ RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */ RXRPC_CALL_COMPLETE, /* - call complete */ NR__RXRPC_CALL_STATES }; /* * Call Tx congestion management modes. */ enum rxrpc_congest_mode { RXRPC_CALL_SLOW_START, RXRPC_CALL_CONGEST_AVOIDANCE, RXRPC_CALL_PACKET_LOSS, RXRPC_CALL_FAST_RETRANSMIT, NR__RXRPC_CONGEST_MODES }; /* * RxRPC call definition * - matched by { connection, call_id } */ struct rxrpc_call { struct rcu_head rcu; struct rxrpc_connection *conn; /* connection carrying call */ struct rxrpc_bundle *bundle; /* Connection bundle to use */ struct rxrpc_peer *peer; /* Peer record for remote address */ struct rxrpc_local *local; /* Representation of local endpoint */ struct rxrpc_sock __rcu *socket; /* socket responsible */ struct rxrpc_net *rxnet; /* Network namespace to which call belongs */ struct key *key; /* Security details */ const struct rxrpc_security *security; /* applied security module */ struct mutex user_mutex; /* User access mutex */ struct sockaddr_rxrpc dest_srx; /* Destination address */ ktime_t delay_ack_at; /* When DELAY ACK needs to happen */ ktime_t ack_lost_at; /* When ACK is figured as lost */ ktime_t resend_at; /* When next resend needs to happen */ ktime_t ping_at; /* When next to send a ping */ ktime_t keepalive_at; /* When next to send a keepalive ping */ ktime_t expect_rx_by; /* When we expect to get a packet by */ ktime_t expect_req_by; /* When we expect to get a request DATA packet by */ ktime_t expect_term_by; /* When we expect call termination by */ u32 next_rx_timo; /* Timeout for next Rx packet (ms) */ u32 next_req_timo; /* Timeout for next Rx request packet (ms) */ u32 hard_timo; /* Maximum lifetime or 0 (s) */ struct timer_list timer; /* Combined event timer */ struct work_struct destroyer; /* In-process-context destroyer */ rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ struct list_head link; /* link in master call list */ struct list_head wait_link; /* Link in local->new_client_calls */ struct hlist_node error_link; /* link in error distribution list */ struct list_head accept_link; /* Link in rx->acceptq */ struct list_head recvmsg_link; /* Link in rx->recvmsg_q */ struct list_head sock_link; /* Link in rx->sock_calls */ struct rb_node sock_node; /* Node in rx->calls */ struct list_head attend_link; /* Link in local->call_attend_q */ struct rxrpc_txbuf *tx_pending; /* Tx buffer being filled */ wait_queue_head_t waitq; /* Wait queue for channel or Tx */ s64 tx_total_len; /* Total length left to be transmitted (or -1) */ unsigned long user_call_ID; /* user-defined call ID */ unsigned long flags; unsigned long events; spinlock_t notify_lock; /* Kernel notification lock */ unsigned int send_abort_why; /* Why the abort [enum rxrpc_abort_reason] */ s32 send_abort; /* Abort code to be sent */ short send_abort_err; /* Error to be associated with the abort */ rxrpc_seq_t send_abort_seq; /* DATA packet that incurred the abort (or 0) */ s32 abort_code; /* Local/remote abort code */ int error; /* Local error incurred */ enum rxrpc_call_state _state; /* Current state of call (needs barrier) */ enum rxrpc_call_completion completion; /* Call completion condition */ refcount_t ref; u8 security_ix; /* Security type */ enum rxrpc_interruptibility interruptibility; /* At what point call may be interrupted */ u32 call_id; /* call ID on connection */ u32 cid; /* connection ID plus channel index */ u32 security_level; /* Security level selected */ int debug_id; /* debug ID for printks */ unsigned short rx_pkt_offset; /* Current recvmsg packet offset */ unsigned short rx_pkt_len; /* Current recvmsg packet len */ /* Transmitted data tracking. */ spinlock_t tx_lock; /* Transmit queue lock */ struct list_head tx_sendmsg; /* Sendmsg prepared packets */ struct list_head tx_buffer; /* Buffer of transmissible packets */ rxrpc_seq_t tx_bottom; /* First packet in buffer */ rxrpc_seq_t tx_transmitted; /* Highest packet transmitted */ rxrpc_seq_t tx_prepared; /* Highest Tx slot prepared. */ rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */ u16 tx_backoff; /* Delay to insert due to Tx failure (ms) */ u8 tx_winsize; /* Maximum size of Tx window */ #define RXRPC_TX_MAX_WINDOW 128 ktime_t tx_last_sent; /* Last time a transmission occurred */ /* Received data tracking */ struct sk_buff_head recvmsg_queue; /* Queue of packets ready for recvmsg() */ struct sk_buff_head rx_oos_queue; /* Queue of out of sequence packets */ rxrpc_seq_t rx_highest_seq; /* Higest sequence number received */ rxrpc_seq_t rx_consumed; /* Highest packet consumed */ rxrpc_serial_t rx_serial; /* Highest serial received for this call */ u8 rx_winsize; /* Size of Rx window */ /* TCP-style slow-start congestion control [RFC5681]. Since the SMSS * is fixed, we keep these numbers in terms of segments (ie. DATA * packets) rather than bytes. */ #define RXRPC_TX_SMSS RXRPC_JUMBO_DATALEN #define RXRPC_MIN_CWND 4 u8 cong_cwnd; /* Congestion window size */ u8 cong_extra; /* Extra to send for congestion management */ u8 cong_ssthresh; /* Slow-start threshold */ enum rxrpc_congest_mode cong_mode:8; /* Congestion management mode */ u8 cong_dup_acks; /* Count of ACKs showing missing packets */ u8 cong_cumul_acks; /* Cumulative ACK count */ ktime_t cong_tstamp; /* Last time cwnd was changed */ struct sk_buff *cong_last_nack; /* Last ACK with nacks received */ /* Receive-phase ACK management (ACKs we send). */ u8 ackr_reason; /* reason to ACK */ u16 ackr_sack_base; /* Starting slot in SACK table ring */ rxrpc_seq_t ackr_window; /* Base of SACK window */ rxrpc_seq_t ackr_wtop; /* Base of SACK window */ unsigned int ackr_nr_unacked; /* Number of unacked packets */ atomic_t ackr_nr_consumed; /* Number of packets needing hard ACK */ struct { #define RXRPC_SACK_SIZE 256 /* SACK table for soft-acked packets */ u8 ackr_sack_table[RXRPC_SACK_SIZE]; } __aligned(8); /* RTT management */ rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */ ktime_t rtt_sent_at[4]; /* Time packet sent */ unsigned long rtt_avail; /* Mask of available slots in bits 0-3, * Mask of pending samples in 8-11 */ #define RXRPC_CALL_RTT_AVAIL_MASK 0xf #define RXRPC_CALL_RTT_PEND_SHIFT 8 /* Transmission-phase ACK management (ACKs we've received). */ ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ rxrpc_seq_t acks_first_seq; /* first sequence number received */ rxrpc_seq_t acks_prev_seq; /* Highest previousPacket received */ rxrpc_seq_t acks_hard_ack; /* Latest hard-ack point */ rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ rxrpc_serial_t acks_highest_serial; /* Highest serial number ACK'd */ }; /* * Summary of a new ACK and the changes it made to the Tx buffer packet states. */ struct rxrpc_ack_summary { u16 nr_acks; /* Number of ACKs in packet */ u16 nr_new_acks; /* Number of new ACKs in packet */ u16 nr_new_nacks; /* Number of new nacks in packet */ u16 nr_retained_nacks; /* Number of nacks retained between ACKs */ u8 ack_reason; bool saw_nacks; /* Saw NACKs in packet */ bool new_low_nack; /* T if new low NACK found */ bool retrans_timeo; /* T if reTx due to timeout happened */ u8 flight_size; /* Number of unreceived transmissions */ /* Place to stash values for tracing */ enum rxrpc_congest_mode mode:8; u8 cwnd; u8 ssthresh; u8 dup_acks; u8 cumulative_acks; }; /* * sendmsg() cmsg-specified parameters. */ enum rxrpc_command { RXRPC_CMD_SEND_DATA, /* send data message */ RXRPC_CMD_SEND_ABORT, /* request abort generation */ RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */ }; struct rxrpc_call_params { s64 tx_total_len; /* Total Tx data length (if send data) */ unsigned long user_call_ID; /* User's call ID */ struct { u32 hard; /* Maximum lifetime (sec) */ u32 idle; /* Max time since last data packet (msec) */ u32 normal; /* Max time since last call packet (msec) */ } timeouts; u8 nr_timeouts; /* Number of timeouts specified */ bool kernel; /* T if kernel is making the call */ enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */ }; struct rxrpc_send_params { struct rxrpc_call_params call; u32 abort_code; /* Abort code to Tx (if abort) */ enum rxrpc_command command : 8; /* The command to implement */ bool exclusive; /* Shared or exclusive call */ bool upgrade; /* If the connection is upgradeable */ }; /* * Buffer of data to be output as a packet. */ struct rxrpc_txbuf { struct list_head call_link; /* Link in call->tx_sendmsg/tx_buffer */ struct list_head tx_link; /* Link in live Enc queue or Tx queue */ ktime_t last_sent; /* Time at which last transmitted */ refcount_t ref; rxrpc_seq_t seq; /* Sequence number of this packet */ rxrpc_serial_t serial; /* Last serial number transmitted with */ unsigned int call_debug_id; unsigned int debug_id; unsigned int len; /* Amount of data in buffer */ unsigned int space; /* Remaining data space */ unsigned int offset; /* Offset of fill point */ unsigned int flags; #define RXRPC_TXBUF_WIRE_FLAGS 0xff /* The wire protocol flags */ #define RXRPC_TXBUF_RESENT 0x100 /* Set if has been resent */ __be16 cksum; /* Checksum to go in header */ unsigned short ack_rwind; /* ACK receive window */ u8 /*enum rxrpc_propose_ack_trace*/ ack_why; /* If ack, why */ u8 nr_kvec; /* Amount of kvec[] used */ struct kvec kvec[3]; }; static inline bool rxrpc_sending_to_server(const struct rxrpc_txbuf *txb) { return txb->flags & RXRPC_CLIENT_INITIATED; } static inline bool rxrpc_sending_to_client(const struct rxrpc_txbuf *txb) { return !rxrpc_sending_to_server(txb); } #include <trace/events/rxrpc.h> /* * Allocate the next serial number on a connection. 0 must be skipped. */ static inline rxrpc_serial_t rxrpc_get_next_serial(struct rxrpc_connection *conn) { rxrpc_serial_t serial; serial = conn->tx_serial; if (serial == 0) serial = 1; conn->tx_serial = serial + 1; return serial; } /* * af_rxrpc.c */ extern atomic_t rxrpc_n_rx_skbs; extern struct workqueue_struct *rxrpc_workqueue; /* * call_accept.c */ int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t); void rxrpc_discard_prealloc(struct rxrpc_sock *); bool rxrpc_new_incoming_call(struct rxrpc_local *local, struct rxrpc_peer *peer, struct rxrpc_connection *conn, struct sockaddr_rxrpc *peer_srx, struct sk_buff *skb); int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long); /* * call_event.c */ void rxrpc_propose_ping(struct rxrpc_call *call, u32 serial, enum rxrpc_propose_ack_trace why); void rxrpc_propose_delay_ACK(struct rxrpc_call *, rxrpc_serial_t, enum rxrpc_propose_ack_trace); void rxrpc_shrink_call_tx_buffer(struct rxrpc_call *); void rxrpc_resend(struct rxrpc_call *call, struct sk_buff *ack_skb); bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb); /* * call_object.c */ extern const char *const rxrpc_call_states[]; extern const char *const rxrpc_call_completions[]; extern struct kmem_cache *rxrpc_call_jar; void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what); struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t, unsigned int); struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, struct rxrpc_conn_parameters *, struct rxrpc_call_params *, gfp_t, unsigned int); void rxrpc_start_call_timer(struct rxrpc_call *call); void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, struct sk_buff *); void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); void rxrpc_release_calls_on_socket(struct rxrpc_sock *); void rxrpc_see_call(struct rxrpc_call *, enum rxrpc_call_trace); struct rxrpc_call *rxrpc_try_get_call(struct rxrpc_call *, enum rxrpc_call_trace); void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace); void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace); void rxrpc_cleanup_call(struct rxrpc_call *); void rxrpc_destroy_all_calls(struct rxrpc_net *); static inline bool rxrpc_is_service_call(const struct rxrpc_call *call) { return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags); } static inline bool rxrpc_is_client_call(const struct rxrpc_call *call) { return !rxrpc_is_service_call(call); } /* * call_state.c */ bool rxrpc_set_call_completion(struct rxrpc_call *call, enum rxrpc_call_completion compl, u32 abort_code, int error); bool rxrpc_call_completed(struct rxrpc_call *call); bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq, u32 abort_code, int error, enum rxrpc_abort_reason why); void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl, int error); static inline void rxrpc_set_call_state(struct rxrpc_call *call, enum rxrpc_call_state state) { /* Order write of completion info before write of ->state. */ smp_store_release(&call->_state, state); wake_up(&call->waitq); } static inline enum rxrpc_call_state __rxrpc_call_state(const struct rxrpc_call *call) { return call->_state; /* Only inside I/O thread */ } static inline bool __rxrpc_call_is_complete(const struct rxrpc_call *call) { return __rxrpc_call_state(call) == RXRPC_CALL_COMPLETE; } static inline enum rxrpc_call_state rxrpc_call_state(const struct rxrpc_call *call) { /* Order read ->state before read of completion info. */ return smp_load_acquire(&call->_state); } static inline bool rxrpc_call_is_complete(const struct rxrpc_call *call) { return rxrpc_call_state(call) == RXRPC_CALL_COMPLETE; } static inline bool rxrpc_call_has_failed(const struct rxrpc_call *call) { return rxrpc_call_is_complete(call) && call->completion != RXRPC_CALL_SUCCEEDED; } /* * conn_client.c */ extern unsigned int rxrpc_reap_client_connections; extern unsigned long rxrpc_conn_idle_client_expiry; extern unsigned long rxrpc_conn_idle_client_fast_expiry; void rxrpc_purge_client_connections(struct rxrpc_local *local); struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace); void rxrpc_put_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace); int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp); void rxrpc_connect_client_calls(struct rxrpc_local *local); void rxrpc_expose_client_call(struct rxrpc_call *); void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *); void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle); void rxrpc_discard_expired_client_conns(struct rxrpc_local *local); void rxrpc_clean_up_local_conns(struct rxrpc_local *); /* * conn_event.c */ void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, struct sk_buff *skb, unsigned int channel); int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb, s32 abort_code, int err, enum rxrpc_abort_reason why); void rxrpc_process_connection(struct work_struct *); void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool); bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb); void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb); static inline bool rxrpc_is_conn_aborted(const struct rxrpc_connection *conn) { /* Order reading the abort info after the state check. */ return smp_load_acquire(&conn->state) == RXRPC_CONN_ABORTED; } /* * conn_object.c */ extern unsigned int rxrpc_connection_expiry; extern unsigned int rxrpc_closed_conn_expiry; void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why); struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *, gfp_t); struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *, struct sockaddr_rxrpc *, struct sk_buff *); void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *); void rxrpc_disconnect_call(struct rxrpc_call *); void rxrpc_kill_client_conn(struct rxrpc_connection *); void rxrpc_queue_conn(struct rxrpc_connection *, enum rxrpc_conn_trace); void rxrpc_see_connection(struct rxrpc_connection *, enum rxrpc_conn_trace); struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *, enum rxrpc_conn_trace); struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *, enum rxrpc_conn_trace); void rxrpc_put_connection(struct rxrpc_connection *, enum rxrpc_conn_trace); void rxrpc_service_connection_reaper(struct work_struct *); void rxrpc_destroy_all_connections(struct rxrpc_net *); static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn) { return conn->out_clientflag; } static inline bool rxrpc_conn_is_service(const struct rxrpc_connection *conn) { return !rxrpc_conn_is_client(conn); } static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn, unsigned long expire_at) { timer_reduce(&conn->timer, expire_at); } /* * conn_service.c */ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *, struct sk_buff *); struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t); void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *, const struct rxrpc_security *, struct sk_buff *); void rxrpc_unpublish_service_conn(struct rxrpc_connection *); /* * input.c */ void rxrpc_congestion_degrade(struct rxrpc_call *); void rxrpc_input_call_packet(struct rxrpc_call *, struct sk_buff *); void rxrpc_implicit_end_call(struct rxrpc_call *, struct sk_buff *); /* * io_thread.c */ int rxrpc_encap_rcv(struct sock *, struct sk_buff *); void rxrpc_error_report(struct sock *); bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why, s32 abort_code, int err); int rxrpc_io_thread(void *data); static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local) { wake_up_process(READ_ONCE(local->io_thread)); } static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why) { return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EPROTO); } /* * insecure.c */ extern const struct rxrpc_security rxrpc_no_security; /* * key.c */ extern struct key_type key_type_rxrpc; int rxrpc_request_key(struct rxrpc_sock *, sockptr_t , int); int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t, u32); /* * local_event.c */ void rxrpc_gen_version_string(void); void rxrpc_send_version_request(struct rxrpc_local *local, struct rxrpc_host_header *hdr, struct sk_buff *skb); /* * local_object.c */ void rxrpc_local_dont_fragment(const struct rxrpc_local *local, bool set); struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc *); struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *, enum rxrpc_local_trace); struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *, enum rxrpc_local_trace); void rxrpc_put_local(struct rxrpc_local *, enum rxrpc_local_trace); struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *, enum rxrpc_local_trace); void rxrpc_unuse_local(struct rxrpc_local *, enum rxrpc_local_trace); void rxrpc_destroy_local(struct rxrpc_local *local); void rxrpc_destroy_all_locals(struct rxrpc_net *); static inline bool __rxrpc_use_local(struct rxrpc_local *local, enum rxrpc_local_trace why) { int r, u; r = refcount_read(&local->ref); u = atomic_fetch_add_unless(&local->active_users, 1, 0); trace_rxrpc_local(local->debug_id, why, r, u); return u != 0; } static inline void rxrpc_see_local(struct rxrpc_local *local, enum rxrpc_local_trace why) { int r, u; r = refcount_read(&local->ref); u = atomic_read(&local->active_users); trace_rxrpc_local(local->debug_id, why, r, u); } /* * misc.c */ extern unsigned int rxrpc_max_backlog __read_mostly; extern unsigned long rxrpc_soft_ack_delay; extern unsigned long rxrpc_idle_ack_delay; extern unsigned int rxrpc_rx_window_size; extern unsigned int rxrpc_rx_mtu; extern unsigned int rxrpc_rx_jumbo_max; #ifdef CONFIG_AF_RXRPC_INJECT_RX_DELAY extern unsigned long rxrpc_inject_rx_delay; #endif /* * net_ns.c */ extern unsigned int rxrpc_net_id; extern struct pernet_operations rxrpc_net_ops; static inline struct rxrpc_net *rxrpc_net(struct net *net) { return net_generic(net, rxrpc_net_id); } /* * output.c */ void rxrpc_send_ACK(struct rxrpc_call *call, u8 ack_reason, rxrpc_serial_t serial, enum rxrpc_propose_ack_trace why); int rxrpc_send_abort_packet(struct rxrpc_call *); void rxrpc_send_conn_abort(struct rxrpc_connection *conn); void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb); void rxrpc_send_keepalive(struct rxrpc_peer *); void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb); /* * peer_event.c */ void rxrpc_input_error(struct rxrpc_local *, struct sk_buff *); void rxrpc_peer_keepalive_worker(struct work_struct *); /* * peer_object.c */ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, const struct sockaddr_rxrpc *); struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, struct sockaddr_rxrpc *srx, gfp_t gfp); struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t, enum rxrpc_peer_trace); void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer); void rxrpc_destroy_all_peers(struct rxrpc_net *); struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace); struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace); void rxrpc_put_peer(struct rxrpc_peer *, enum rxrpc_peer_trace); /* * proc.c */ extern const struct seq_operations rxrpc_call_seq_ops; extern const struct seq_operations rxrpc_connection_seq_ops; extern const struct seq_operations rxrpc_bundle_seq_ops; extern const struct seq_operations rxrpc_peer_seq_ops; extern const struct seq_operations rxrpc_local_seq_ops; /* * recvmsg.c */ void rxrpc_notify_socket(struct rxrpc_call *); int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); /* * Abort a call due to a protocol error. */ static inline int rxrpc_abort_eproto(struct rxrpc_call *call, struct sk_buff *skb, s32 abort_code, enum rxrpc_abort_reason why) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); rxrpc_abort_call(call, sp->hdr.seq, abort_code, -EPROTO, why); return -EPROTO; } /* * rtt.c */ void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int, rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); ktime_t rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans); void rxrpc_peer_init_rtt(struct rxrpc_peer *); /* * rxkad.c */ #ifdef CONFIG_RXKAD extern const struct rxrpc_security rxkad; #endif /* * security.c */ int __init rxrpc_init_security(void); const struct rxrpc_security *rxrpc_security_lookup(u8); void rxrpc_exit_security(void); int rxrpc_init_client_call_security(struct rxrpc_call *); int rxrpc_init_client_conn_security(struct rxrpc_connection *); const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *, struct sk_buff *); struct key *rxrpc_look_up_server_security(struct rxrpc_connection *, struct sk_buff *, u32, u32); /* * sendmsg.c */ bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error, enum rxrpc_abort_reason why); int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t); /* * server_key.c */ extern struct key_type key_type_rxrpc_s; int rxrpc_server_keyring(struct rxrpc_sock *, sockptr_t, int); /* * skbuff.c */ void rxrpc_kernel_data_consumed(struct rxrpc_call *, struct sk_buff *); void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); void rxrpc_purge_queue(struct sk_buff_head *); /* * stats.c */ int rxrpc_stats_show(struct seq_file *seq, void *v); int rxrpc_stats_clear(struct file *file, char *buf, size_t size); #define rxrpc_inc_stat(rxnet, s) atomic_inc(&(rxnet)->s) #define rxrpc_dec_stat(rxnet, s) atomic_dec(&(rxnet)->s) /* * sysctl.c */ #ifdef CONFIG_SYSCTL extern int __init rxrpc_sysctl_init(void); extern void rxrpc_sysctl_exit(void); #else static inline int __init rxrpc_sysctl_init(void) { return 0; } static inline void rxrpc_sysctl_exit(void) {} #endif /* * txbuf.c */ extern atomic_t rxrpc_nr_txbuf; struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_size, size_t data_align, gfp_t gfp); struct rxrpc_txbuf *rxrpc_alloc_ack_txbuf(struct rxrpc_call *call, size_t sack_size); void rxrpc_get_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what); void rxrpc_see_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what); void rxrpc_put_txbuf(struct rxrpc_txbuf *txb, enum rxrpc_txbuf_trace what); /* * utils.c */ int rxrpc_extract_addr_from_skb(struct sockaddr_rxrpc *, struct sk_buff *); static inline bool before(u32 seq1, u32 seq2) { return (s32)(seq1 - seq2) < 0; } static inline bool before_eq(u32 seq1, u32 seq2) { return (s32)(seq1 - seq2) <= 0; } static inline bool after(u32 seq1, u32 seq2) { return (s32)(seq1 - seq2) > 0; } static inline bool after_eq(u32 seq1, u32 seq2) { return (s32)(seq1 - seq2) >= 0; } /* * debug tracing */ extern unsigned int rxrpc_debug; #define dbgprintk(FMT,...) \ printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) #define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__) #define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) #define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__) #if defined(__KDEBUG) #define _enter(FMT,...) kenter(FMT,##__VA_ARGS__) #define _leave(FMT,...) kleave(FMT,##__VA_ARGS__) #define _debug(FMT,...) kdebug(FMT,##__VA_ARGS__) #elif defined(CONFIG_AF_RXRPC_DEBUG) #define RXRPC_DEBUG_KENTER 0x01 #define RXRPC_DEBUG_KLEAVE 0x02 #define RXRPC_DEBUG_KDEBUG 0x04 #define _enter(FMT,...) \ do { \ if (unlikely(rxrpc_debug & RXRPC_DEBUG_KENTER)) \ kenter(FMT,##__VA_ARGS__); \ } while (0) #define _leave(FMT,...) \ do { \ if (unlikely(rxrpc_debug & RXRPC_DEBUG_KLEAVE)) \ kleave(FMT,##__VA_ARGS__); \ } while (0) #define _debug(FMT,...) \ do { \ if (unlikely(rxrpc_debug & RXRPC_DEBUG_KDEBUG)) \ kdebug(FMT,##__VA_ARGS__); \ } while (0) #else #define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__) #define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__) #define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__) #endif /* * debug assertion checking */ #if 1 // defined(__KDEBUGALL) #define ASSERT(X) \ do { \ if (unlikely(!(X))) { \ pr_err("Assertion failed\n"); \ BUG(); \ } \ } while (0) #define ASSERTCMP(X, OP, Y) \ do { \ __typeof__(X) _x = (X); \ __typeof__(Y) _y = (__typeof__(X))(Y); \ if (unlikely(!(_x OP _y))) { \ pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \ (unsigned long)_x, (unsigned long)_x, #OP, \ (unsigned long)_y, (unsigned long)_y); \ BUG(); \ } \ } while (0) #define ASSERTIF(C, X) \ do { \ if (unlikely((C) && !(X))) { \ pr_err("Assertion failed\n"); \ BUG(); \ } \ } while (0) #define ASSERTIFCMP(C, X, OP, Y) \ do { \ __typeof__(X) _x = (X); \ __typeof__(Y) _y = (__typeof__(X))(Y); \ if (unlikely((C) && !(_x OP _y))) { \ pr_err("Assertion failed - %lu(0x%lx) %s %lu(0x%lx) is false\n", \ (unsigned long)_x, (unsigned long)_x, #OP, \ (unsigned long)_y, (unsigned long)_y); \ BUG(); \ } \ } while (0) #else #define ASSERT(X) \ do { \ } while (0) #define ASSERTCMP(X, OP, Y) \ do { \ } while (0) #define ASSERTIF(C, X) \ do { \ } while (0) #define ASSERTIFCMP(C, X, OP, Y) \ do { \ } while (0) #endif /* __KDEBUGALL */
1 1 1 1 3 3 1 1 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 // SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. */ #include "xfs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_sb.h" #include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_bmap.h" #include "xfs_alloc.h" #include "xfs_fsops.h" #include "xfs_trans.h" #include "xfs_buf_item.h" #include "xfs_log.h" #include "xfs_log_priv.h" #include "xfs_dir2.h" #include "xfs_extfree_item.h" #include "xfs_mru_cache.h" #include "xfs_inode_item.h" #include "xfs_icache.h" #include "xfs_trace.h" #include "xfs_icreate_item.h" #include "xfs_filestream.h" #include "xfs_quota.h" #include "xfs_sysfs.h" #include "xfs_ondisk.h" #include "xfs_rmap_item.h" #include "xfs_refcount_item.h" #include "xfs_bmap_item.h" #include "xfs_reflink.h" #include "xfs_pwork.h" #include "xfs_ag.h" #include "xfs_defer.h" #include "xfs_attr_item.h" #include "xfs_xattr.h" #include "xfs_iunlink_item.h" #include "xfs_dahash_test.h" #include "xfs_rtbitmap.h" #include "xfs_exchmaps_item.h" #include "xfs_parent.h" #include "xfs_rtalloc.h" #include "scrub/stats.h" #include "scrub/rcbag_btree.h" #include <linux/magic.h> #include <linux/fs_context.h> #include <linux/fs_parser.h> static const struct super_operations xfs_super_operations; static struct dentry *xfs_debugfs; /* top-level xfs debugfs dir */ static struct kset *xfs_kset; /* top-level xfs sysfs dir */ #ifdef DEBUG static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */ #endif enum xfs_dax_mode { XFS_DAX_INODE = 0, XFS_DAX_ALWAYS = 1, XFS_DAX_NEVER = 2, }; /* Were quota mount options provided? Must use the upper 16 bits of qflags. */ #define XFS_QFLAGS_MNTOPTS (1U << 31) static void xfs_mount_set_dax_mode( struct xfs_mount *mp, enum xfs_dax_mode mode) { switch (mode) { case XFS_DAX_INODE: mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER); break; case XFS_DAX_ALWAYS: mp->m_features |= XFS_FEAT_DAX_ALWAYS; mp->m_features &= ~XFS_FEAT_DAX_NEVER; break; case XFS_DAX_NEVER: mp->m_features |= XFS_FEAT_DAX_NEVER; mp->m_features &= ~XFS_FEAT_DAX_ALWAYS; break; } } static const struct constant_table dax_param_enums[] = { {"inode", XFS_DAX_INODE }, {"always", XFS_DAX_ALWAYS }, {"never", XFS_DAX_NEVER }, {} }; /* * Table driven mount option parser. */ enum { Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep, Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota, Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce, Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum, }; static const struct fs_parameter_spec xfs_fs_parameters[] = { fsparam_u32("logbufs", Opt_logbufs), fsparam_string("logbsize", Opt_logbsize), fsparam_string("logdev", Opt_logdev), fsparam_string("rtdev", Opt_rtdev), fsparam_flag("wsync", Opt_wsync), fsparam_flag("noalign", Opt_noalign), fsparam_flag("swalloc", Opt_swalloc), fsparam_u32("sunit", Opt_sunit), fsparam_u32("swidth", Opt_swidth), fsparam_flag("nouuid", Opt_nouuid), fsparam_flag("grpid", Opt_grpid), fsparam_flag("nogrpid", Opt_nogrpid), fsparam_flag("bsdgroups", Opt_bsdgroups), fsparam_flag("sysvgroups", Opt_sysvgroups), fsparam_string("allocsize", Opt_allocsize), fsparam_flag("norecovery", Opt_norecovery), fsparam_flag("inode64", Opt_inode64), fsparam_flag("inode32", Opt_inode32), fsparam_flag("ikeep", Opt_ikeep), fsparam_flag("noikeep", Opt_noikeep), fsparam_flag("largeio", Opt_largeio), fsparam_flag("nolargeio", Opt_nolargeio), fsparam_flag("attr2", Opt_attr2), fsparam_flag("noattr2", Opt_noattr2), fsparam_flag("filestreams", Opt_filestreams), fsparam_flag("quota", Opt_quota), fsparam_flag("noquota", Opt_noquota), fsparam_flag("usrquota", Opt_usrquota), fsparam_flag("grpquota", Opt_grpquota), fsparam_flag("prjquota", Opt_prjquota), fsparam_flag("uquota", Opt_uquota), fsparam_flag("gquota", Opt_gquota), fsparam_flag("pquota", Opt_pquota), fsparam_flag("uqnoenforce", Opt_uqnoenforce), fsparam_flag("gqnoenforce", Opt_gqnoenforce), fsparam_flag("pqnoenforce", Opt_pqnoenforce), fsparam_flag("qnoenforce", Opt_qnoenforce), fsparam_flag("discard", Opt_discard), fsparam_flag("nodiscard", Opt_nodiscard), fsparam_flag("dax", Opt_dax), fsparam_enum("dax", Opt_dax_enum, dax_param_enums), {} }; struct proc_xfs_info { uint64_t flag; char *str; }; static int xfs_fs_show_options( struct seq_file *m, struct dentry *root) { static struct proc_xfs_info xfs_info_set[] = { /* the few simple ones we can get from the mount struct */ { XFS_FEAT_IKEEP, ",ikeep" }, { XFS_FEAT_WSYNC, ",wsync" }, { XFS_FEAT_NOALIGN, ",noalign" }, { XFS_FEAT_SWALLOC, ",swalloc" }, { XFS_FEAT_NOUUID, ",nouuid" }, { XFS_FEAT_NORECOVERY, ",norecovery" }, { XFS_FEAT_ATTR2, ",attr2" }, { XFS_FEAT_FILESTREAMS, ",filestreams" }, { XFS_FEAT_GRPID, ",grpid" }, { XFS_FEAT_DISCARD, ",discard" }, { XFS_FEAT_LARGE_IOSIZE, ",largeio" }, { XFS_FEAT_DAX_ALWAYS, ",dax=always" }, { XFS_FEAT_DAX_NEVER, ",dax=never" }, { 0, NULL } }; struct xfs_mount *mp = XFS_M(root->d_sb); struct proc_xfs_info *xfs_infop; for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) { if (mp->m_features & xfs_infop->flag) seq_puts(m, xfs_infop->str); } seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64); if (xfs_has_allocsize(mp)) seq_printf(m, ",allocsize=%dk", (1 << mp->m_allocsize_log) >> 10); if (mp->m_logbufs > 0) seq_printf(m, ",logbufs=%d", mp->m_logbufs); if (mp->m_logbsize > 0) seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10); if (mp->m_logname) seq_show_option(m, "logdev", mp->m_logname); if (mp->m_rtname) seq_show_option(m, "rtdev", mp->m_rtname); if (mp->m_dalign > 0) seq_printf(m, ",sunit=%d", (int)XFS_FSB_TO_BB(mp, mp->m_dalign)); if (mp->m_swidth > 0) seq_printf(m, ",swidth=%d", (int)XFS_FSB_TO_BB(mp, mp->m_swidth)); if (mp->m_qflags & XFS_UQUOTA_ENFD) seq_puts(m, ",usrquota"); else if (mp->m_qflags & XFS_UQUOTA_ACCT) seq_puts(m, ",uqnoenforce"); if (mp->m_qflags & XFS_PQUOTA_ENFD) seq_puts(m, ",prjquota"); else if (mp->m_qflags & XFS_PQUOTA_ACCT) seq_puts(m, ",pqnoenforce"); if (mp->m_qflags & XFS_GQUOTA_ENFD) seq_puts(m, ",grpquota"); else if (mp->m_qflags & XFS_GQUOTA_ACCT) seq_puts(m, ",gqnoenforce"); if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT)) seq_puts(m, ",noquota"); return 0; } static bool xfs_set_inode_alloc_perag( struct xfs_perag *pag, xfs_ino_t ino, xfs_agnumber_t max_metadata) { if (!xfs_is_inode32(pag_mount(pag))) { set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); return false; } if (ino > XFS_MAXINUMBER_32) { clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); return false; } set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate); if (pag_agno(pag) < max_metadata) set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); else clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate); return true; } /* * Set parameters for inode allocation heuristics, taking into account * filesystem size and inode32/inode64 mount options; i.e. specifically * whether or not XFS_FEAT_SMALL_INUMS is set. * * Inode allocation patterns are altered only if inode32 is requested * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large. * If altered, XFS_OPSTATE_INODE32 is set as well. * * An agcount independent of that in the mount structure is provided * because in the growfs case, mp->m_sb.sb_agcount is not yet updated * to the potentially higher ag count. * * Returns the maximum AG index which may contain inodes. */ xfs_agnumber_t xfs_set_inode_alloc( struct xfs_mount *mp, xfs_agnumber_t agcount) { xfs_agnumber_t index; xfs_agnumber_t maxagi = 0; xfs_sb_t *sbp = &mp->m_sb; xfs_agnumber_t max_metadata; xfs_agino_t agino; xfs_ino_t ino; /* * Calculate how much should be reserved for inodes to meet * the max inode percentage. Used only for inode32. */ if (M_IGEO(mp)->maxicount) { uint64_t icount; icount = sbp->sb_dblocks * sbp->sb_imax_pct; do_div(icount, 100); icount += sbp->sb_agblocks - 1; do_div(icount, sbp->sb_agblocks); max_metadata = icount; } else { max_metadata = agcount; } /* Get the last possible inode in the filesystem */ agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1); ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); /* * If user asked for no more than 32-bit inodes, and the fs is * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter * the allocator to accommodate the request. */ if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32) xfs_set_inode32(mp); else xfs_clear_inode32(mp); for (index = 0; index < agcount; index++) { struct xfs_perag *pag; ino = XFS_AGINO_TO_INO(mp, index, agino); pag = xfs_perag_get(mp, index); if (xfs_set_inode_alloc_perag(pag, ino, max_metadata)) maxagi++; xfs_perag_put(pag); } return xfs_is_inode32(mp) ? maxagi : agcount; } static int xfs_setup_dax_always( struct xfs_mount *mp) { if (!mp->m_ddev_targp->bt_daxdev && (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) { xfs_alert(mp, "DAX unsupported by block device. Turning off DAX."); goto disable_dax; } if (mp->m_super->s_blocksize != PAGE_SIZE) { xfs_alert(mp, "DAX not supported for blocksize. Turning off DAX."); goto disable_dax; } if (xfs_has_reflink(mp) && bdev_is_partition(mp->m_ddev_targp->bt_bdev)) { xfs_alert(mp, "DAX and reflink cannot work with multi-partitions!"); return -EINVAL; } return 0; disable_dax: xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER); return 0; } STATIC int xfs_blkdev_get( xfs_mount_t *mp, const char *name, struct file **bdev_filep) { int error = 0; *bdev_filep = bdev_file_open_by_path(name, BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES, mp->m_super, &fs_holder_ops); if (IS_ERR(*bdev_filep)) { error = PTR_ERR(*bdev_filep); *bdev_filep = NULL; xfs_warn(mp, "Invalid device [%s], error=%d", name, error); } return error; } STATIC void xfs_shutdown_devices( struct xfs_mount *mp) { /* * Udev is triggered whenever anyone closes a block device or unmounts * a file systemm on a block device. * The default udev rules invoke blkid to read the fs super and create * symlinks to the bdev under /dev/disk. For this, it uses buffered * reads through the page cache. * * xfs_db also uses buffered reads to examine metadata. There is no * coordination between xfs_db and udev, which means that they can run * concurrently. Note there is no coordination between the kernel and * blkid either. * * On a system with 64k pages, the page cache can cache the superblock * and the root inode (and hence the root directory) with the same 64k * page. If udev spawns blkid after the mkfs and the system is busy * enough that it is still running when xfs_db starts up, they'll both * read from the same page in the pagecache. * * The unmount writes updated inode metadata to disk directly. The XFS * buffer cache does not use the bdev pagecache, so it needs to * invalidate that pagecache on unmount. If the above scenario occurs, * the pagecache no longer reflects what's on disk, xfs_db reads the * stale metadata, and fails to find /a. Most of the time this succeeds * because closing a bdev invalidates the page cache, but when processes * race, everyone loses. */ if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { blkdev_issue_flush(mp->m_logdev_targp->bt_bdev); invalidate_bdev(mp->m_logdev_targp->bt_bdev); } if (mp->m_rtdev_targp) { blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev); invalidate_bdev(mp->m_rtdev_targp->bt_bdev); } blkdev_issue_flush(mp->m_ddev_targp->bt_bdev); invalidate_bdev(mp->m_ddev_targp->bt_bdev); } /* * The file system configurations are: * (1) device (partition) with data and internal log * (2) logical volume with data and log subvolumes. * (3) logical volume with data, log, and realtime subvolumes. * * We only have to handle opening the log and realtime volumes here if * they are present. The data subvolume has already been opened by * get_sb_bdev() and is stored in sb->s_bdev. */ STATIC int xfs_open_devices( struct xfs_mount *mp) { struct super_block *sb = mp->m_super; struct block_device *ddev = sb->s_bdev; struct file *logdev_file = NULL, *rtdev_file = NULL; int error; /* * Open real time and log devices - order is important. */ if (mp->m_logname) { error = xfs_blkdev_get(mp, mp->m_logname, &logdev_file); if (error) return error; } if (mp->m_rtname) { error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_file); if (error) goto out_close_logdev; if (file_bdev(rtdev_file) == ddev || (logdev_file && file_bdev(rtdev_file) == file_bdev(logdev_file))) { xfs_warn(mp, "Cannot mount filesystem with identical rtdev and ddev/logdev."); error = -EINVAL; goto out_close_rtdev; } } /* * Setup xfs_mount buffer target pointers */ error = -ENOMEM; mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file); if (!mp->m_ddev_targp) goto out_close_rtdev; if (rtdev_file) { mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file); if (!mp->m_rtdev_targp) goto out_free_ddev_targ; } if (logdev_file && file_bdev(logdev_file) != ddev) { mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file); if (!mp->m_logdev_targp) goto out_free_rtdev_targ; } else { mp->m_logdev_targp = mp->m_ddev_targp; /* Handle won't be used, drop it */ if (logdev_file) bdev_fput(logdev_file); } return 0; out_free_rtdev_targ: if (mp->m_rtdev_targp) xfs_free_buftarg(mp->m_rtdev_targp); out_free_ddev_targ: xfs_free_buftarg(mp->m_ddev_targp); out_close_rtdev: if (rtdev_file) bdev_fput(rtdev_file); out_close_logdev: if (logdev_file) bdev_fput(logdev_file); return error; } /* * Setup xfs_mount buffer target pointers based on superblock */ STATIC int xfs_setup_devices( struct xfs_mount *mp) { int error; error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize); if (error) return error; if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) { unsigned int log_sector_size = BBSIZE; if (xfs_has_sector(mp)) log_sector_size = mp->m_sb.sb_logsectsize; error = xfs_setsize_buftarg(mp->m_logdev_targp, log_sector_size); if (error) return error; } if (mp->m_rtdev_targp) { error = xfs_setsize_buftarg(mp->m_rtdev_targp, mp->m_sb.sb_sectsize); if (error) return error; } return 0; } STATIC int xfs_init_mount_workqueues( struct xfs_mount *mp) { mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s", XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 1, mp->m_super->s_id); if (!mp->m_buf_workqueue) goto out; mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 0, mp->m_super->s_id); if (!mp->m_unwritten_workqueue) goto out_destroy_buf; mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s", XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 0, mp->m_super->s_id); if (!mp->m_reclaim_workqueue) goto out_destroy_unwritten; mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s", XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM), 0, mp->m_super->s_id); if (!mp->m_blockgc_wq) goto out_destroy_reclaim; mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s", XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM), 1, mp->m_super->s_id); if (!mp->m_inodegc_wq) goto out_destroy_blockgc; mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id); if (!mp->m_sync_workqueue) goto out_destroy_inodegc; return 0; out_destroy_inodegc: destroy_workqueue(mp->m_inodegc_wq); out_destroy_blockgc: destroy_workqueue(mp->m_blockgc_wq); out_destroy_reclaim: destroy_workqueue(mp->m_reclaim_workqueue); out_destroy_unwritten: destroy_workqueue(mp->m_unwritten_workqueue); out_destroy_buf: destroy_workqueue(mp->m_buf_workqueue); out: return -ENOMEM; } STATIC void xfs_destroy_mount_workqueues( struct xfs_mount *mp) { destroy_workqueue(mp->m_sync_workqueue); destroy_workqueue(mp->m_blockgc_wq); destroy_workqueue(mp->m_inodegc_wq); destroy_workqueue(mp->m_reclaim_workqueue); destroy_workqueue(mp->m_unwritten_workqueue); destroy_workqueue(mp->m_buf_workqueue); } static void xfs_flush_inodes_worker( struct work_struct *work) { struct xfs_mount *mp = container_of(work, struct xfs_mount, m_flush_inodes_work); struct super_block *sb = mp->m_super; if (down_read_trylock(&sb->s_umount)) { sync_inodes_sb(sb); up_read(&sb->s_umount); } } /* * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting * for IO to complete so that we effectively throttle multiple callers to the * rate at which IO is completing. */ void xfs_flush_inodes( struct xfs_mount *mp) { /* * If flush_work() returns true then that means we waited for a flush * which was already in progress. Don't bother running another scan. */ if (flush_work(&mp->m_flush_inodes_work)) return; queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work); flush_work(&mp->m_flush_inodes_work); } /* Catch misguided souls that try to use this interface on XFS */ STATIC struct inode * xfs_fs_alloc_inode( struct super_block *sb) { BUG(); return NULL; } /* * Now that the generic code is guaranteed not to be accessing * the linux inode, we can inactivate and reclaim the inode. */ STATIC void xfs_fs_destroy_inode( struct inode *inode) { struct xfs_inode *ip = XFS_I(inode); trace_xfs_destroy_inode(ip); ASSERT(!rwsem_is_locked(&inode->i_rwsem)); XFS_STATS_INC(ip->i_mount, vn_rele); XFS_STATS_INC(ip->i_mount, vn_remove); xfs_inode_mark_reclaimable(ip); } static void xfs_fs_dirty_inode( struct inode *inode, int flags) { struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; if (!(inode->i_sb->s_flags & SB_LAZYTIME)) return; /* * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC) * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed * in flags possibly together with I_DIRTY_SYNC. */ if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME)) return; if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp)) return; xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP); xfs_trans_commit(tp); } /* * Slab object creation initialisation for the XFS inode. * This covers only the idempotent fields in the XFS inode; * all other fields need to be initialised on allocation * from the slab. This avoids the need to repeatedly initialise * fields in the xfs inode that left in the initialise state * when freeing the inode. */ STATIC void xfs_fs_inode_init_once( void *inode) { struct xfs_inode *ip = inode; memset(ip, 0, sizeof(struct xfs_inode)); /* vfs inode */ inode_init_once(VFS_I(ip)); /* xfs inode */ atomic_set(&ip->i_pincount, 0); spin_lock_init(&ip->i_flags_lock); init_rwsem(&ip->i_lock); } /* * We do an unlocked check for XFS_IDONTCACHE here because we are already * serialised against cache hits here via the inode->i_lock and igrab() in * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be * racing with us, and it avoids needing to grab a spinlock here for every inode * we drop the final reference on. */ STATIC int xfs_fs_drop_inode( struct inode *inode) { struct xfs_inode *ip = XFS_I(inode); /* * If this unlinked inode is in the middle of recovery, don't * drop the inode just yet; log recovery will take care of * that. See the comment for this inode flag. */ if (ip->i_flags & XFS_IRECOVERY) { ASSERT(xlog_recovery_needed(ip->i_mount->m_log)); return 0; } return generic_drop_inode(inode); } static void xfs_mount_free( struct xfs_mount *mp) { if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) xfs_free_buftarg(mp->m_logdev_targp); if (mp->m_rtdev_targp) xfs_free_buftarg(mp->m_rtdev_targp); if (mp->m_ddev_targp) xfs_free_buftarg(mp->m_ddev_targp); debugfs_remove(mp->m_debugfs); kfree(mp->m_rtname); kfree(mp->m_logname); kfree(mp); } STATIC int xfs_fs_sync_fs( struct super_block *sb, int wait) { struct xfs_mount *mp = XFS_M(sb); int error; trace_xfs_fs_sync_fs(mp, __return_address); /* * Doing anything during the async pass would be counterproductive. */ if (!wait) return 0; error = xfs_log_force(mp, XFS_LOG_SYNC); if (error) return error; if (laptop_mode) { /* * The disk must be active because we're syncing. * We schedule log work now (now that the disk is * active) instead of later (when it might not be). */ flush_delayed_work(&mp->m_log->l_work); } /* * If we are called with page faults frozen out, it means we are about * to freeze the transaction subsystem. Take the opportunity to shut * down inodegc because once SB_FREEZE_FS is set it's too late to * prevent inactivation races with freeze. The fs doesn't get called * again by the freezing process until after SB_FREEZE_FS has been set, * so it's now or never. Same logic applies to speculative allocation * garbage collection. * * We don't care if this is a normal syncfs call that does this or * freeze that does this - we can run this multiple times without issue * and we won't race with a restart because a restart can only occur * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE. */ if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) { xfs_inodegc_stop(mp); xfs_blockgc_stop(mp); } return 0; } STATIC int xfs_fs_statfs( struct dentry *dentry, struct kstatfs *statp) { struct xfs_mount *mp = XFS_M(dentry->d_sb); xfs_sb_t *sbp = &mp->m_sb; struct xfs_inode *ip = XFS_I(d_inode(dentry)); uint64_t fakeinos, id; uint64_t icount; uint64_t ifree; uint64_t fdblocks; xfs_extlen_t lsize; int64_t ffree; /* * Expedite background inodegc but don't wait. We do not want to block * here waiting hours for a billion extent file to be truncated. */ xfs_inodegc_push(mp); statp->f_type = XFS_SUPER_MAGIC; statp->f_namelen = MAXNAMELEN - 1; id = huge_encode_dev(mp->m_ddev_targp->bt_dev); statp->f_fsid = u64_to_fsid(id); icount = percpu_counter_sum(&mp->m_icount); ifree = percpu_counter_sum(&mp->m_ifree); fdblocks = percpu_counter_sum(&mp->m_fdblocks); spin_lock(&mp->m_sb_lock); statp->f_bsize = sbp->sb_blocksize; lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0; statp->f_blocks = sbp->sb_dblocks - lsize; spin_unlock(&mp->m_sb_lock); /* make sure statp->f_bfree does not underflow */ statp->f_bfree = max_t(int64_t, 0, fdblocks - xfs_fdblocks_unavailable(mp)); statp->f_bavail = statp->f_bfree; fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree); statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER); if (M_IGEO(mp)->maxicount) statp->f_files = min_t(typeof(statp->f_files), statp->f_files, M_IGEO(mp)->maxicount); /* If sb_icount overshot maxicount, report actual allocation */ statp->f_files = max_t(typeof(statp->f_files), statp->f_files, sbp->sb_icount); /* make sure statp->f_ffree does not underflow */ ffree = statp->f_files - (icount - ifree); statp->f_ffree = max_t(int64_t, ffree, 0); if (XFS_IS_REALTIME_MOUNT(mp) && (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { s64 freertx; statp->f_blocks = sbp->sb_rblocks; freertx = percpu_counter_sum_positive(&mp->m_frextents); statp->f_bavail = statp->f_bfree = xfs_rtbxlen_to_blen(mp, freertx); } if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) xfs_qm_statvfs(ip, statp); return 0; } STATIC void xfs_save_resvblks(struct xfs_mount *mp) { mp->m_resblks_save = mp->m_resblks; xfs_reserve_blocks(mp, 0); } STATIC void xfs_restore_resvblks(struct xfs_mount *mp) { uint64_t resblks; if (mp->m_resblks_save) { resblks = mp->m_resblks_save; mp->m_resblks_save = 0; } else resblks = xfs_default_resblks(mp); xfs_reserve_blocks(mp, resblks); } /* * Second stage of a freeze. The data is already frozen so we only * need to take care of the metadata. Once that's done sync the superblock * to the log to dirty it in case of a crash while frozen. This ensures that we * will recover the unlinked inode lists on the next mount. */ STATIC int xfs_fs_freeze( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); unsigned int flags; int ret; /* * The filesystem is now frozen far enough that memory reclaim * cannot safely operate on the filesystem. Hence we need to * set a GFP_NOFS context here to avoid recursion deadlocks. */ flags = memalloc_nofs_save(); xfs_save_resvblks(mp); ret = xfs_log_quiesce(mp); memalloc_nofs_restore(flags); /* * For read-write filesystems, we need to restart the inodegc on error * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not * going to be run to restart it now. We are at SB_FREEZE_FS level * here, so we can restart safely without racing with a stop in * xfs_fs_sync_fs(). */ if (ret && !xfs_is_readonly(mp)) { xfs_blockgc_start(mp); xfs_inodegc_start(mp); } return ret; } STATIC int xfs_fs_unfreeze( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); xfs_restore_resvblks(mp); xfs_log_work_queue(mp); /* * Don't reactivate the inodegc worker on a readonly filesystem because * inodes are sent directly to reclaim. Don't reactivate the blockgc * worker because there are no speculative preallocations on a readonly * filesystem. */ if (!xfs_is_readonly(mp)) { xfs_blockgc_start(mp); xfs_inodegc_start(mp); } return 0; } /* * This function fills in xfs_mount_t fields based on mount args. * Note: the superblock _has_ now been read in. */ STATIC int xfs_finish_flags( struct xfs_mount *mp) { /* Fail a mount where the logbuf is smaller than the log stripe */ if (xfs_has_logv2(mp)) { if (mp->m_logbsize <= 0 && mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) { mp->m_logbsize = mp->m_sb.sb_logsunit; } else if (mp->m_logbsize > 0 && mp->m_logbsize < mp->m_sb.sb_logsunit) { xfs_warn(mp, "logbuf size must be greater than or equal to log stripe size"); return -EINVAL; } } else { /* Fail a mount if the logbuf is larger than 32K */ if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) { xfs_warn(mp, "logbuf size for version 1 logs must be 16K or 32K"); return -EINVAL; } } /* * V5 filesystems always use attr2 format for attributes. */ if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) { xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. " "attr2 is always enabled for V5 filesystems."); return -EINVAL; } /* * prohibit r/w mounts of read-only filesystems */ if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) { xfs_warn(mp, "cannot mount a read-only filesystem as read-write"); return -EROFS; } if ((mp->m_qflags & XFS_GQUOTA_ACCT) && (mp->m_qflags & XFS_PQUOTA_ACCT) && !xfs_has_pquotino(mp)) { xfs_warn(mp, "Super block does not support project and group quota together"); return -EINVAL; } return 0; } static int xfs_init_percpu_counters( struct xfs_mount *mp) { int error; error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL); if (error) return -ENOMEM; error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL); if (error) goto free_icount; error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL); if (error) goto free_ifree; error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL); if (error) goto free_fdblocks; error = percpu_counter_init(&mp->m_delalloc_rtextents, 0, GFP_KERNEL); if (error) goto free_delalloc; error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL); if (error) goto free_delalloc_rt; return 0; free_delalloc_rt: percpu_counter_destroy(&mp->m_delalloc_rtextents); free_delalloc: percpu_counter_destroy(&mp->m_delalloc_blks); free_fdblocks: percpu_counter_destroy(&mp->m_fdblocks); free_ifree: percpu_counter_destroy(&mp->m_ifree); free_icount: percpu_counter_destroy(&mp->m_icount); return -ENOMEM; } void xfs_reinit_percpu_counters( struct xfs_mount *mp) { percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount); percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree); percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks); percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents); } static void xfs_destroy_percpu_counters( struct xfs_mount *mp) { percpu_counter_destroy(&mp->m_icount); percpu_counter_destroy(&mp->m_ifree); percpu_counter_destroy(&mp->m_fdblocks); ASSERT(xfs_is_shutdown(mp) || percpu_counter_sum(&mp->m_delalloc_rtextents) == 0); percpu_counter_destroy(&mp->m_delalloc_rtextents); ASSERT(xfs_is_shutdown(mp) || percpu_counter_sum(&mp->m_delalloc_blks) == 0); percpu_counter_destroy(&mp->m_delalloc_blks); percpu_counter_destroy(&mp->m_frextents); } static int xfs_inodegc_init_percpu( struct xfs_mount *mp) { struct xfs_inodegc *gc; int cpu; mp->m_inodegc = alloc_percpu(struct xfs_inodegc); if (!mp->m_inodegc) return -ENOMEM; for_each_possible_cpu(cpu) { gc = per_cpu_ptr(mp->m_inodegc, cpu); gc->cpu = cpu; gc->mp = mp; init_llist_head(&gc->list); gc->items = 0; gc->error = 0; INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); } return 0; } static void xfs_inodegc_free_percpu( struct xfs_mount *mp) { if (!mp->m_inodegc) return; free_percpu(mp->m_inodegc); } static void xfs_fs_put_super( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid); xfs_filestream_unmount(mp); xfs_unmountfs(mp); xfs_rtmount_freesb(mp); xfs_freesb(mp); xchk_mount_stats_free(mp); free_percpu(mp->m_stats.xs_stats); xfs_inodegc_free_percpu(mp); xfs_destroy_percpu_counters(mp); xfs_destroy_mount_workqueues(mp); xfs_shutdown_devices(mp); } static long xfs_fs_nr_cached_objects( struct super_block *sb, struct shrink_control *sc) { /* Paranoia: catch incorrect calls during mount setup or teardown */ if (WARN_ON_ONCE(!sb->s_fs_info)) return 0; return xfs_reclaim_inodes_count(XFS_M(sb)); } static long xfs_fs_free_cached_objects( struct super_block *sb, struct shrink_control *sc) { return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan); } static void xfs_fs_shutdown( struct super_block *sb) { xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED); } static const struct super_operations xfs_super_operations = { .alloc_inode = xfs_fs_alloc_inode, .destroy_inode = xfs_fs_destroy_inode, .dirty_inode = xfs_fs_dirty_inode, .drop_inode = xfs_fs_drop_inode, .put_super = xfs_fs_put_super, .sync_fs = xfs_fs_sync_fs, .freeze_fs = xfs_fs_freeze, .unfreeze_fs = xfs_fs_unfreeze, .statfs = xfs_fs_statfs, .show_options = xfs_fs_show_options, .nr_cached_objects = xfs_fs_nr_cached_objects, .free_cached_objects = xfs_fs_free_cached_objects, .shutdown = xfs_fs_shutdown, }; static int suffix_kstrtoint( const char *s, unsigned int base, int *res) { int last, shift_left_factor = 0, _res; char *value; int ret = 0; value = kstrdup(s, GFP_KERNEL); if (!value) return -ENOMEM; last = strlen(value) - 1; if (value[last] == 'K' || value[last] == 'k') { shift_left_factor = 10; value[last] = '\0'; } if (value[last] == 'M' || value[last] == 'm') { shift_left_factor = 20; value[last] = '\0'; } if (value[last] == 'G' || value[last] == 'g') { shift_left_factor = 30; value[last] = '\0'; } if (kstrtoint(value, base, &_res)) ret = -EINVAL; kfree(value); *res = _res << shift_left_factor; return ret; } static inline void xfs_fs_warn_deprecated( struct fs_context *fc, struct fs_parameter *param, uint64_t flag, bool value) { /* Don't print the warning if reconfiguring and current mount point * already had the flag set */ if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) return; xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); } /* * Set mount state from a mount option. * * NOTE: mp->m_super is NULL here! */ static int xfs_fs_parse_param( struct fs_context *fc, struct fs_parameter *param) { struct xfs_mount *parsing_mp = fc->s_fs_info; struct fs_parse_result result; int size = 0; int opt; BUILD_BUG_ON(XFS_QFLAGS_MNTOPTS & XFS_MOUNT_QUOTA_ALL); opt = fs_parse(fc, xfs_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_logbufs: parsing_mp->m_logbufs = result.uint_32; return 0; case Opt_logbsize: if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize)) return -EINVAL; return 0; case Opt_logdev: kfree(parsing_mp->m_logname); parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL); if (!parsing_mp->m_logname) return -ENOMEM; return 0; case Opt_rtdev: kfree(parsing_mp->m_rtname); parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL); if (!parsing_mp->m_rtname) return -ENOMEM; return 0; case Opt_allocsize: if (suffix_kstrtoint(param->string, 10, &size)) return -EINVAL; parsing_mp->m_allocsize_log = ffs(size) - 1; parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE; return 0; case Opt_grpid: case Opt_bsdgroups: parsing_mp->m_features |= XFS_FEAT_GRPID; return 0; case Opt_nogrpid: case Opt_sysvgroups: parsing_mp->m_features &= ~XFS_FEAT_GRPID; return 0; case Opt_wsync: parsing_mp->m_features |= XFS_FEAT_WSYNC; return 0; case Opt_norecovery: parsing_mp->m_features |= XFS_FEAT_NORECOVERY; return 0; case Opt_noalign: parsing_mp->m_features |= XFS_FEAT_NOALIGN; return 0; case Opt_swalloc: parsing_mp->m_features |= XFS_FEAT_SWALLOC; return 0; case Opt_sunit: parsing_mp->m_dalign = result.uint_32; return 0; case Opt_swidth: parsing_mp->m_swidth = result.uint_32; return 0; case Opt_inode32: parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS; return 0; case Opt_inode64: parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS; return 0; case Opt_nouuid: parsing_mp->m_features |= XFS_FEAT_NOUUID; return 0; case Opt_largeio: parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE; return 0; case Opt_nolargeio: parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE; return 0; case Opt_filestreams: parsing_mp->m_features |= XFS_FEAT_FILESTREAMS; return 0; case Opt_noquota: parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT; parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD; parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_quota: case Opt_uquota: case Opt_usrquota: parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD); parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_qnoenforce: case Opt_uqnoenforce: parsing_mp->m_qflags |= XFS_UQUOTA_ACCT; parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD; parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_pquota: case Opt_prjquota: parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD); parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_pqnoenforce: parsing_mp->m_qflags |= XFS_PQUOTA_ACCT; parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD; parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_gquota: case Opt_grpquota: parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD); parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_gqnoenforce: parsing_mp->m_qflags |= XFS_GQUOTA_ACCT; parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD; parsing_mp->m_qflags |= XFS_QFLAGS_MNTOPTS; return 0; case Opt_discard: parsing_mp->m_features |= XFS_FEAT_DISCARD; return 0; case Opt_nodiscard: parsing_mp->m_features &= ~XFS_FEAT_DISCARD; return 0; #ifdef CONFIG_FS_DAX case Opt_dax: xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS); return 0; case Opt_dax_enum: xfs_mount_set_dax_mode(parsing_mp, result.uint_32); return 0; #endif /* Following mount options will be removed in September 2025 */ case Opt_ikeep: xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true); parsing_mp->m_features |= XFS_FEAT_IKEEP; return 0; case Opt_noikeep: xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false); parsing_mp->m_features &= ~XFS_FEAT_IKEEP; return 0; case Opt_attr2: xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true); parsing_mp->m_features |= XFS_FEAT_ATTR2; return 0; case Opt_noattr2: xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true); parsing_mp->m_features |= XFS_FEAT_NOATTR2; return 0; default: xfs_warn(parsing_mp, "unknown mount option [%s].", param->key); return -EINVAL; } return 0; } static int xfs_fs_validate_params( struct xfs_mount *mp) { /* No recovery flag requires a read-only mount */ if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) { xfs_warn(mp, "no-recovery mounts must be read-only."); return -EINVAL; } /* * We have not read the superblock at this point, so only the attr2 * mount option can set the attr2 feature by this stage. */ if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) { xfs_warn(mp, "attr2 and noattr2 cannot both be specified."); return -EINVAL; } if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) { xfs_warn(mp, "sunit and swidth options incompatible with the noalign option"); return -EINVAL; } if (!IS_ENABLED(CONFIG_XFS_QUOTA) && (mp->m_qflags & ~XFS_QFLAGS_MNTOPTS)) { xfs_warn(mp, "quota support not available in this kernel."); return -EINVAL; } if ((mp->m_dalign && !mp->m_swidth) || (!mp->m_dalign && mp->m_swidth)) { xfs_warn(mp, "sunit and swidth must be specified together"); return -EINVAL; } if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) { xfs_warn(mp, "stripe width (%d) must be a multiple of the stripe unit (%d)", mp->m_swidth, mp->m_dalign); return -EINVAL; } if (mp->m_logbufs != -1 && mp->m_logbufs != 0 && (mp->m_logbufs < XLOG_MIN_ICLOGS || mp->m_logbufs > XLOG_MAX_ICLOGS)) { xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]", mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS); return -EINVAL; } if (mp->m_logbsize != -1 && mp->m_logbsize != 0 && (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE || mp->m_logbsize > XLOG_MAX_RECORD_BSIZE || !is_power_of_2(mp->m_logbsize))) { xfs_warn(mp, "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]", mp->m_logbsize); return -EINVAL; } if (xfs_has_allocsize(mp) && (mp->m_allocsize_log > XFS_MAX_IO_LOG || mp->m_allocsize_log < XFS_MIN_IO_LOG)) { xfs_warn(mp, "invalid log iosize: %d [not %d-%d]", mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG); return -EINVAL; } return 0; } struct dentry * xfs_debugfs_mkdir( const char *name, struct dentry *parent) { struct dentry *child; /* Apparently we're expected to ignore error returns?? */ child = debugfs_create_dir(name, parent); if (IS_ERR(child)) return NULL; return child; } static int xfs_fs_fill_super( struct super_block *sb, struct fs_context *fc) { struct xfs_mount *mp = sb->s_fs_info; struct inode *root; int flags = 0, error; mp->m_super = sb; /* * Copy VFS mount flags from the context now that all parameter parsing * is guaranteed to have been completed by either the old mount API or * the newer fsopen/fsconfig API. */ if (fc->sb_flags & SB_RDONLY) xfs_set_readonly(mp); if (fc->sb_flags & SB_DIRSYNC) mp->m_features |= XFS_FEAT_DIRSYNC; if (fc->sb_flags & SB_SYNCHRONOUS) mp->m_features |= XFS_FEAT_WSYNC; error = xfs_fs_validate_params(mp); if (error) return error; sb_min_blocksize(sb, BBSIZE); sb->s_xattr = xfs_xattr_handlers; sb->s_export_op = &xfs_export_operations; #ifdef CONFIG_XFS_QUOTA sb->s_qcop = &xfs_quotactl_operations; sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; #endif sb->s_op = &xfs_super_operations; /* * Delay mount work if the debug hook is set. This is debug * instrumention to coordinate simulation of xfs mount failures with * VFS superblock operations */ if (xfs_globals.mount_delay) { xfs_notice(mp, "Delaying mount for %d seconds.", xfs_globals.mount_delay); msleep(xfs_globals.mount_delay * 1000); } if (fc->sb_flags & SB_SILENT) flags |= XFS_MFSI_QUIET; error = xfs_open_devices(mp); if (error) return error; if (xfs_debugfs) { mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id, xfs_debugfs); } else { mp->m_debugfs = NULL; } error = xfs_init_mount_workqueues(mp); if (error) goto out_shutdown_devices; error = xfs_init_percpu_counters(mp); if (error) goto out_destroy_workqueues; error = xfs_inodegc_init_percpu(mp); if (error) goto out_destroy_counters; /* Allocate stats memory before we do operations that might use it */ mp->m_stats.xs_stats = alloc_percpu(struct xfsstats); if (!mp->m_stats.xs_stats) { error = -ENOMEM; goto out_destroy_inodegc; } error = xchk_mount_stats_alloc(mp); if (error) goto out_free_stats; error = xfs_readsb(mp, flags); if (error) goto out_free_scrub_stats; error = xfs_finish_flags(mp); if (error) goto out_free_sb; error = xfs_setup_devices(mp); if (error) goto out_free_sb; /* * V4 support is undergoing deprecation. * * Note: this has to use an open coded m_features check as xfs_has_crc * always returns false for !CONFIG_XFS_SUPPORT_V4. */ if (!(mp->m_features & XFS_FEAT_CRC)) { if (!IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) { xfs_warn(mp, "Deprecated V4 format (crc=0) not supported by kernel."); error = -EINVAL; goto out_free_sb; } xfs_warn_once(mp, "Deprecated V4 format (crc=0) will not be supported after September 2030."); } /* ASCII case insensitivity is undergoing deprecation. */ if (xfs_has_asciici(mp)) { #ifdef CONFIG_XFS_SUPPORT_ASCII_CI xfs_warn_once(mp, "Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030."); #else xfs_warn(mp, "Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel."); error = -EINVAL; goto out_free_sb; #endif } /* Filesystem claims it needs repair, so refuse the mount. */ if (xfs_has_needsrepair(mp)) { xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair."); error = -EFSCORRUPTED; goto out_free_sb; } /* * Don't touch the filesystem if a user tool thinks it owns the primary * superblock. mkfs doesn't clear the flag from secondary supers, so * we don't check them at all. */ if (mp->m_sb.sb_inprogress) { xfs_warn(mp, "Offline file system operation in progress!"); error = -EFSCORRUPTED; goto out_free_sb; } if (mp->m_sb.sb_blocksize > PAGE_SIZE) { size_t max_folio_size = mapping_max_folio_size_supported(); if (!xfs_has_crc(mp)) { xfs_warn(mp, "V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.", mp->m_sb.sb_blocksize, PAGE_SIZE); error = -ENOSYS; goto out_free_sb; } if (mp->m_sb.sb_blocksize > max_folio_size) { xfs_warn(mp, "block size (%u bytes) not supported; Only block size (%zu) or less is supported", mp->m_sb.sb_blocksize, max_folio_size); error = -ENOSYS; goto out_free_sb; } xfs_warn_experimental(mp, XFS_EXPERIMENTAL_LBS); } /* Ensure this filesystem fits in the page cache limits */ if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) || xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) { xfs_warn(mp, "file system too large to be mounted on this system."); error = -EFBIG; goto out_free_sb; } /* * XFS block mappings use 54 bits to store the logical block offset. * This should suffice to handle the maximum file size that the VFS * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON * to check this assertion. * * Avoid integer overflow by comparing the maximum bmbt offset to the * maximum pagecache offset in units of fs blocks. */ if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) { xfs_warn(mp, "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!", XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE), XFS_MAX_FILEOFF); error = -EINVAL; goto out_free_sb; } error = xfs_rtmount_readsb(mp); if (error) goto out_free_sb; error = xfs_filestream_mount(mp); if (error) goto out_free_rtsb; /* * we must configure the block size in the superblock before we run the * full mount process as the mount process can lookup and cache inodes. */ sb->s_magic = XFS_SUPER_MAGIC; sb->s_blocksize = mp->m_sb.sb_blocksize; sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_max_links = XFS_MAXLINK; sb->s_time_gran = 1; if (xfs_has_bigtime(mp)) { sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN); sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX); } else { sb->s_time_min = XFS_LEGACY_TIME_MIN; sb->s_time_max = XFS_LEGACY_TIME_MAX; } trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max); sb->s_iflags |= SB_I_CGROUPWB; set_posix_acl_flag(sb); /* version 5 superblocks support inode version counters. */ if (xfs_has_crc(mp)) sb->s_flags |= SB_I_VERSION; if (xfs_has_dax_always(mp)) { error = xfs_setup_dax_always(mp); if (error) goto out_filestream_unmount; } if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) { xfs_warn(mp, "mounting with \"discard\" option, but the device does not support discard"); mp->m_features &= ~XFS_FEAT_DISCARD; } if (xfs_has_metadir(mp)) xfs_warn_experimental(mp, XFS_EXPERIMENTAL_METADIR); if (xfs_has_reflink(mp)) { if (mp->m_sb.sb_rblocks) { xfs_alert(mp, "reflink not compatible with realtime device!"); error = -EINVAL; goto out_filestream_unmount; } if (xfs_globals.always_cow) { xfs_info(mp, "using DEBUG-only always_cow mode."); mp->m_always_cow = true; } } if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) { xfs_alert(mp, "reverse mapping btree not compatible with realtime device!"); error = -EINVAL; goto out_filestream_unmount; } if (xfs_has_exchange_range(mp)) xfs_warn_experimental(mp, XFS_EXPERIMENTAL_EXCHRANGE); if (xfs_has_parent(mp)) xfs_warn_experimental(mp, XFS_EXPERIMENTAL_PPTR); /* * If no quota mount options were provided, maybe we'll try to pick * up the quota accounting and enforcement flags from the ondisk sb. */ if (!(mp->m_qflags & XFS_QFLAGS_MNTOPTS)) xfs_set_resuming_quotaon(mp); mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS; error = xfs_mountfs(mp); if (error) goto out_filestream_unmount; root = igrab(VFS_I(mp->m_rootip)); if (!root) { error = -ENOENT; goto out_unmount; } sb->s_root = d_make_root(root); if (!sb->s_root) { error = -ENOMEM; goto out_unmount; } return 0; out_filestream_unmount: xfs_filestream_unmount(mp); out_free_rtsb: xfs_rtmount_freesb(mp); out_free_sb: xfs_freesb(mp); out_free_scrub_stats: xchk_mount_stats_free(mp); out_free_stats: free_percpu(mp->m_stats.xs_stats); out_destroy_inodegc: xfs_inodegc_free_percpu(mp); out_destroy_counters: xfs_destroy_percpu_counters(mp); out_destroy_workqueues: xfs_destroy_mount_workqueues(mp); out_shutdown_devices: xfs_shutdown_devices(mp); return error; out_unmount: xfs_filestream_unmount(mp); xfs_unmountfs(mp); goto out_free_rtsb; } static int xfs_fs_get_tree( struct fs_context *fc) { return get_tree_bdev(fc, xfs_fs_fill_super); } static int xfs_remount_rw( struct xfs_mount *mp) { struct xfs_sb *sbp = &mp->m_sb; int error; if (xfs_has_norecovery(mp)) { xfs_warn(mp, "ro->rw transition prohibited on norecovery mount"); return -EINVAL; } if (xfs_sb_is_v5(sbp) && xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { xfs_warn(mp, "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem", (sbp->sb_features_ro_compat & XFS_SB_FEAT_RO_COMPAT_UNKNOWN)); return -EINVAL; } xfs_clear_readonly(mp); /* * If this is the first remount to writeable state we might have some * superblock changes to update. */ if (mp->m_update_sb) { error = xfs_sync_sb(mp, false); if (error) { xfs_warn(mp, "failed to write sb changes"); return error; } mp->m_update_sb = false; } /* * Fill out the reserve pool if it is empty. Use the stashed value if * it is non-zero, otherwise go with the default. */ xfs_restore_resvblks(mp); xfs_log_work_queue(mp); xfs_blockgc_start(mp); /* Create the per-AG metadata reservation pool .*/ error = xfs_fs_reserve_ag_blocks(mp); if (error && error != -ENOSPC) return error; /* Re-enable the background inode inactivation worker. */ xfs_inodegc_start(mp); return 0; } static int xfs_remount_ro( struct xfs_mount *mp) { struct xfs_icwalk icw = { .icw_flags = XFS_ICWALK_FLAG_SYNC, }; int error; /* Flush all the dirty data to disk. */ error = sync_filesystem(mp->m_super); if (error) return error; /* * Cancel background eofb scanning so it cannot race with the final * log force+buftarg wait and deadlock the remount. */ xfs_blockgc_stop(mp); /* * Clear out all remaining COW staging extents and speculative post-EOF * preallocations so that we don't leave inodes requiring inactivation * cleanups during reclaim on a read-only mount. We must process every * cached inode, so this requires a synchronous cache scan. */ error = xfs_blockgc_free_space(mp, &icw); if (error) { xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); return error; } /* * Stop the inodegc background worker. xfs_fs_reconfigure already * flushed all pending inodegc work when it sync'd the filesystem. * The VFS holds s_umount, so we know that inodes cannot enter * xfs_fs_destroy_inode during a remount operation. In readonly mode * we send inodes straight to reclaim, so no inodes will be queued. */ xfs_inodegc_stop(mp); /* Free the per-AG metadata reservation pool. */ xfs_fs_unreserve_ag_blocks(mp); /* * Before we sync the metadata, we need to free up the reserve block * pool so that the used block count in the superblock on disk is * correct at the end of the remount. Stash the current* reserve pool * size so that if we get remounted rw, we can return it to the same * size. */ xfs_save_resvblks(mp); xfs_log_clean(mp); xfs_set_readonly(mp); return 0; } /* * Logically we would return an error here to prevent users from believing * they might have changed mount options using remount which can't be changed. * * But unfortunately mount(8) adds all options from mtab and fstab to the mount * arguments in some cases so we can't blindly reject options, but have to * check for each specified option if it actually differs from the currently * set option and only reject it if that's the case. * * Until that is implemented we return success for every remount request, and * silently ignore all options that we can't actually change. */ static int xfs_fs_reconfigure( struct fs_context *fc) { struct xfs_mount *mp = XFS_M(fc->root->d_sb); struct xfs_mount *new_mp = fc->s_fs_info; int flags = fc->sb_flags; int error; new_mp->m_qflags &= ~XFS_QFLAGS_MNTOPTS; /* version 5 superblocks always support version counters. */ if (xfs_has_crc(mp)) fc->sb_flags |= SB_I_VERSION; error = xfs_fs_validate_params(new_mp); if (error) return error; /* inode32 -> inode64 */ if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) { mp->m_features &= ~XFS_FEAT_SMALL_INUMS; mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); } /* inode64 -> inode32 */ if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) { mp->m_features |= XFS_FEAT_SMALL_INUMS; mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount); } /* ro -> rw */ if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) { error = xfs_remount_rw(mp); if (error) return error; } /* rw -> ro */ if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) { error = xfs_remount_ro(mp); if (error) return error; } return 0; } static void xfs_fs_free( struct fs_context *fc) { struct xfs_mount *mp = fc->s_fs_info; /* * mp is stored in the fs_context when it is initialized. * mp is transferred to the superblock on a successful mount, * but if an error occurs before the transfer we have to free * it here. */ if (mp) xfs_mount_free(mp); } static const struct fs_context_operations xfs_context_ops = { .parse_param = xfs_fs_parse_param, .get_tree = xfs_fs_get_tree, .reconfigure = xfs_fs_reconfigure, .free = xfs_fs_free, }; /* * WARNING: do not initialise any parameters in this function that depend on * mount option parsing having already been performed as this can be called from * fsopen() before any parameters have been set. */ static int xfs_init_fs_context( struct fs_context *fc) { struct xfs_mount *mp; int i; mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL); if (!mp) return -ENOMEM; spin_lock_init(&mp->m_sb_lock); for (i = 0; i < XG_TYPE_MAX; i++) xa_init(&mp->m_groups[i].xa); mutex_init(&mp->m_growlock); INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker); INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); mp->m_kobj.kobject.kset = xfs_kset; /* * We don't create the finobt per-ag space reservation until after log * recovery, so we must set this to true so that an ifree transaction * started during log recovery will not depend on space reservations * for finobt expansion. */ mp->m_finobt_nores = true; /* * These can be overridden by the mount option parsing. */ mp->m_logbufs = -1; mp->m_logbsize = -1; mp->m_allocsize_log = 16; /* 64k */ xfs_hooks_init(&mp->m_dir_update_hooks); fc->s_fs_info = mp; fc->ops = &xfs_context_ops; return 0; } static void xfs_kill_sb( struct super_block *sb) { kill_block_super(sb); xfs_mount_free(XFS_M(sb)); } static struct file_system_type xfs_fs_type = { .owner = THIS_MODULE, .name = "xfs", .init_fs_context = xfs_init_fs_context, .parameters = xfs_fs_parameters, .kill_sb = xfs_kill_sb, .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP | FS_MGTIME, }; MODULE_ALIAS_FS("xfs"); STATIC int __init xfs_init_caches(void) { int error; xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0, SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT, NULL); if (!xfs_buf_cache) goto out; xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket", sizeof(struct xlog_ticket), 0, 0, NULL); if (!xfs_log_ticket_cache) goto out_destroy_buf_cache; error = xfs_btree_init_cur_caches(); if (error) goto out_destroy_log_ticket_cache; error = rcbagbt_init_cur_cache(); if (error) goto out_destroy_btree_cur_cache; error = xfs_defer_init_item_caches(); if (error) goto out_destroy_rcbagbt_cur_cache; xfs_da_state_cache = kmem_cache_create("xfs_da_state", sizeof(struct xfs_da_state), 0, 0, NULL); if (!xfs_da_state_cache) goto out_destroy_defer_item_cache; xfs_ifork_cache = kmem_cache_create("xfs_ifork", sizeof(struct xfs_ifork), 0, 0, NULL); if (!xfs_ifork_cache) goto out_destroy_da_state_cache; xfs_trans_cache = kmem_cache_create("xfs_trans", sizeof(struct xfs_trans), 0, 0, NULL); if (!xfs_trans_cache) goto out_destroy_ifork_cache; /* * The size of the cache-allocated buf log item is the maximum * size possible under XFS. This wastes a little bit of memory, * but it is much faster. */ xfs_buf_item_cache = kmem_cache_create("xfs_buf_item", sizeof(struct xfs_buf_log_item), 0, 0, NULL); if (!xfs_buf_item_cache) goto out_destroy_trans_cache; xfs_efd_cache = kmem_cache_create("xfs_efd_item", xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS), 0, 0, NULL); if (!xfs_efd_cache) goto out_destroy_buf_item_cache; xfs_efi_cache = kmem_cache_create("xfs_efi_item", xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS), 0, 0, NULL); if (!xfs_efi_cache) goto out_destroy_efd_cache; xfs_inode_cache = kmem_cache_create("xfs_inode", sizeof(struct xfs_inode), 0, (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT), xfs_fs_inode_init_once); if (!xfs_inode_cache) goto out_destroy_efi_cache; xfs_ili_cache = kmem_cache_create("xfs_ili", sizeof(struct xfs_inode_log_item), 0, SLAB_RECLAIM_ACCOUNT, NULL); if (!xfs_ili_cache) goto out_destroy_inode_cache; xfs_icreate_cache = kmem_cache_create("xfs_icr", sizeof(struct xfs_icreate_item), 0, 0, NULL); if (!xfs_icreate_cache) goto out_destroy_ili_cache; xfs_rud_cache = kmem_cache_create("xfs_rud_item", sizeof(struct xfs_rud_log_item), 0, 0, NULL); if (!xfs_rud_cache) goto out_destroy_icreate_cache; xfs_rui_cache = kmem_cache_create("xfs_rui_item", xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS), 0, 0, NULL); if (!xfs_rui_cache) goto out_destroy_rud_cache; xfs_cud_cache = kmem_cache_create("xfs_cud_item", sizeof(struct xfs_cud_log_item), 0, 0, NULL); if (!xfs_cud_cache) goto out_destroy_rui_cache; xfs_cui_cache = kmem_cache_create("xfs_cui_item", xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS), 0, 0, NULL); if (!xfs_cui_cache) goto out_destroy_cud_cache; xfs_bud_cache = kmem_cache_create("xfs_bud_item", sizeof(struct xfs_bud_log_item), 0, 0, NULL); if (!xfs_bud_cache) goto out_destroy_cui_cache; xfs_bui_cache = kmem_cache_create("xfs_bui_item", xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS), 0, 0, NULL); if (!xfs_bui_cache) goto out_destroy_bud_cache; xfs_attrd_cache = kmem_cache_create("xfs_attrd_item", sizeof(struct xfs_attrd_log_item), 0, 0, NULL); if (!xfs_attrd_cache) goto out_destroy_bui_cache; xfs_attri_cache = kmem_cache_create("xfs_attri_item", sizeof(struct xfs_attri_log_item), 0, 0, NULL); if (!xfs_attri_cache) goto out_destroy_attrd_cache; xfs_iunlink_cache = kmem_cache_create("xfs_iul_item", sizeof(struct xfs_iunlink_item), 0, 0, NULL); if (!xfs_iunlink_cache) goto out_destroy_attri_cache; xfs_xmd_cache = kmem_cache_create("xfs_xmd_item", sizeof(struct xfs_xmd_log_item), 0, 0, NULL); if (!xfs_xmd_cache) goto out_destroy_iul_cache; xfs_xmi_cache = kmem_cache_create("xfs_xmi_item", sizeof(struct xfs_xmi_log_item), 0, 0, NULL); if (!xfs_xmi_cache) goto out_destroy_xmd_cache; xfs_parent_args_cache = kmem_cache_create("xfs_parent_args", sizeof(struct xfs_parent_args), 0, 0, NULL); if (!xfs_parent_args_cache) goto out_destroy_xmi_cache; return 0; out_destroy_xmi_cache: kmem_cache_destroy(xfs_xmi_cache); out_destroy_xmd_cache: kmem_cache_destroy(xfs_xmd_cache); out_destroy_iul_cache: kmem_cache_destroy(xfs_iunlink_cache); out_destroy_attri_cache: kmem_cache_destroy(xfs_attri_cache); out_destroy_attrd_cache: kmem_cache_destroy(xfs_attrd_cache); out_destroy_bui_cache: kmem_cache_destroy(xfs_bui_cache); out_destroy_bud_cache: kmem_cache_destroy(xfs_bud_cache); out_destroy_cui_cache: kmem_cache_destroy(xfs_cui_cache); out_destroy_cud_cache: kmem_cache_destroy(xfs_cud_cache); out_destroy_rui_cache: kmem_cache_destroy(xfs_rui_cache); out_destroy_rud_cache: kmem_cache_destroy(xfs_rud_cache); out_destroy_icreate_cache: kmem_cache_destroy(xfs_icreate_cache); out_destroy_ili_cache: kmem_cache_destroy(xfs_ili_cache); out_destroy_inode_cache: kmem_cache_destroy(xfs_inode_cache); out_destroy_efi_cache: kmem_cache_destroy(xfs_efi_cache); out_destroy_efd_cache: kmem_cache_destroy(xfs_efd_cache); out_destroy_buf_item_cache: kmem_cache_destroy(xfs_buf_item_cache); out_destroy_trans_cache: kmem_cache_destroy(xfs_trans_cache); out_destroy_ifork_cache: kmem_cache_destroy(xfs_ifork_cache); out_destroy_da_state_cache: kmem_cache_destroy(xfs_da_state_cache); out_destroy_defer_item_cache: xfs_defer_destroy_item_caches(); out_destroy_rcbagbt_cur_cache: rcbagbt_destroy_cur_cache(); out_destroy_btree_cur_cache: xfs_btree_destroy_cur_caches(); out_destroy_log_ticket_cache: kmem_cache_destroy(xfs_log_ticket_cache); out_destroy_buf_cache: kmem_cache_destroy(xfs_buf_cache); out: return -ENOMEM; } STATIC void xfs_destroy_caches(void) { /* * Make sure all delayed rcu free are flushed before we * destroy caches. */ rcu_barrier(); kmem_cache_destroy(xfs_parent_args_cache); kmem_cache_destroy(xfs_xmd_cache); kmem_cache_destroy(xfs_xmi_cache); kmem_cache_destroy(xfs_iunlink_cache); kmem_cache_destroy(xfs_attri_cache); kmem_cache_destroy(xfs_attrd_cache); kmem_cache_destroy(xfs_bui_cache); kmem_cache_destroy(xfs_bud_cache); kmem_cache_destroy(xfs_cui_cache); kmem_cache_destroy(xfs_cud_cache); kmem_cache_destroy(xfs_rui_cache); kmem_cache_destroy(xfs_rud_cache); kmem_cache_destroy(xfs_icreate_cache); kmem_cache_destroy(xfs_ili_cache); kmem_cache_destroy(xfs_inode_cache); kmem_cache_destroy(xfs_efi_cache); kmem_cache_destroy(xfs_efd_cache); kmem_cache_destroy(xfs_buf_item_cache); kmem_cache_destroy(xfs_trans_cache); kmem_cache_destroy(xfs_ifork_cache); kmem_cache_destroy(xfs_da_state_cache); xfs_defer_destroy_item_caches(); rcbagbt_destroy_cur_cache(); xfs_btree_destroy_cur_caches(); kmem_cache_destroy(xfs_log_ticket_cache); kmem_cache_destroy(xfs_buf_cache); } STATIC int __init xfs_init_workqueues(void) { /* * The allocation workqueue can be used in memory reclaim situations * (writepage path), and parallelism is only limited by the number of * AGs in all the filesystems mounted. Hence use the default large * max_active value for this workqueue. */ xfs_alloc_wq = alloc_workqueue("xfsalloc", XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0); if (!xfs_alloc_wq) return -ENOMEM; xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND), 0); if (!xfs_discard_wq) goto out_free_alloc_wq; return 0; out_free_alloc_wq: destroy_workqueue(xfs_alloc_wq); return -ENOMEM; } STATIC void xfs_destroy_workqueues(void) { destroy_workqueue(xfs_discard_wq); destroy_workqueue(xfs_alloc_wq); } STATIC int __init init_xfs_fs(void) { int error; xfs_check_ondisk_structs(); error = xfs_dahash_test(); if (error) return error; printk(KERN_INFO XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled\n"); xfs_dir_startup(); error = xfs_init_caches(); if (error) goto out; error = xfs_init_workqueues(); if (error) goto out_destroy_caches; error = xfs_mru_cache_init(); if (error) goto out_destroy_wq; error = xfs_init_procfs(); if (error) goto out_mru_cache_uninit; error = xfs_sysctl_register(); if (error) goto out_cleanup_procfs; xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL); xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj); if (!xfs_kset) { error = -ENOMEM; goto out_debugfs_unregister; } xfsstats.xs_kobj.kobject.kset = xfs_kset; xfsstats.xs_stats = alloc_percpu(struct xfsstats); if (!xfsstats.xs_stats) { error = -ENOMEM; goto out_kset_unregister; } error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL, "stats"); if (error) goto out_free_stats; error = xchk_global_stats_setup(xfs_debugfs); if (error) goto out_remove_stats_kobj; #ifdef DEBUG xfs_dbg_kobj.kobject.kset = xfs_kset; error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug"); if (error) goto out_remove_scrub_stats; #endif error = xfs_qm_init(); if (error) goto out_remove_dbg_kobj; error = register_filesystem(&xfs_fs_type); if (error) goto out_qm_exit; return 0; out_qm_exit: xfs_qm_exit(); out_remove_dbg_kobj: #ifdef DEBUG xfs_sysfs_del(&xfs_dbg_kobj); out_remove_scrub_stats: #endif xchk_global_stats_teardown(); out_remove_stats_kobj: xfs_sysfs_del(&xfsstats.xs_kobj); out_free_stats: free_percpu(xfsstats.xs_stats); out_kset_unregister: kset_unregister(xfs_kset); out_debugfs_unregister: debugfs_remove(xfs_debugfs); xfs_sysctl_unregister(); out_cleanup_procfs: xfs_cleanup_procfs(); out_mru_cache_uninit: xfs_mru_cache_uninit(); out_destroy_wq: xfs_destroy_workqueues(); out_destroy_caches: xfs_destroy_caches(); out: return error; } STATIC void __exit exit_xfs_fs(void) { xfs_qm_exit(); unregister_filesystem(&xfs_fs_type); #ifdef DEBUG xfs_sysfs_del(&xfs_dbg_kobj); #endif xchk_global_stats_teardown(); xfs_sysfs_del(&xfsstats.xs_kobj); free_percpu(xfsstats.xs_stats); kset_unregister(xfs_kset); debugfs_remove(xfs_debugfs); xfs_sysctl_unregister(); xfs_cleanup_procfs(); xfs_mru_cache_uninit(); xfs_destroy_workqueues(); xfs_destroy_caches(); xfs_uuid_table_free(); } module_init(init_xfs_fs); module_exit(exit_xfs_fs); MODULE_AUTHOR("Silicon Graphics, Inc."); MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled"); MODULE_LICENSE("GPL");
4263 4263 137 136 137 8 145 23 23 23 23 94 94 23 23 112 2 2 94 93 6 6 5 4 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 // SPDX-License-Identifier: GPL-2.0 /* * property.c - Unified device property interface. * * Copyright (C) 2014, Intel Corporation * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com> * Mika Westerberg <mika.westerberg@linux.intel.com> */ #include <linux/device.h> #include <linux/err.h> #include <linux/export.h> #include <linux/kconfig.h> #include <linux/of.h> #include <linux/property.h> #include <linux/phy.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> struct fwnode_handle *__dev_fwnode(struct device *dev) { return IS_ENABLED(CONFIG_OF) && dev->of_node ? of_fwnode_handle(dev->of_node) : dev->fwnode; } EXPORT_SYMBOL_GPL(__dev_fwnode); const struct fwnode_handle *__dev_fwnode_const(const struct device *dev) { return IS_ENABLED(CONFIG_OF) && dev->of_node ? of_fwnode_handle(dev->of_node) : dev->fwnode; } EXPORT_SYMBOL_GPL(__dev_fwnode_const); /** * device_property_present - check if a property of a device is present * @dev: Device whose property is being checked * @propname: Name of the property * * Check if property @propname is present in the device firmware description. * * Return: true if property @propname is present. Otherwise, returns false. */ bool device_property_present(const struct device *dev, const char *propname) { return fwnode_property_present(dev_fwnode(dev), propname); } EXPORT_SYMBOL_GPL(device_property_present); /** * fwnode_property_present - check if a property of a firmware node is present * @fwnode: Firmware node whose property to check * @propname: Name of the property * * Return: true if property @propname is present. Otherwise, returns false. */ bool fwnode_property_present(const struct fwnode_handle *fwnode, const char *propname) { bool ret; if (IS_ERR_OR_NULL(fwnode)) return false; ret = fwnode_call_bool_op(fwnode, property_present, propname); if (ret) return ret; return fwnode_call_bool_op(fwnode->secondary, property_present, propname); } EXPORT_SYMBOL_GPL(fwnode_property_present); /** * device_property_read_u8_array - return a u8 array property of a device * @dev: Device to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Function reads an array of u8 properties with @propname from the device * firmware description and stores them to @val if found. * * It's recommended to call device_property_count_u8() instead of calling * this function with @val equals %NULL and @nval equals 0. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u8_array(const struct device *dev, const char *propname, u8 *val, size_t nval) { return fwnode_property_read_u8_array(dev_fwnode(dev), propname, val, nval); } EXPORT_SYMBOL_GPL(device_property_read_u8_array); /** * device_property_read_u16_array - return a u16 array property of a device * @dev: Device to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Function reads an array of u16 properties with @propname from the device * firmware description and stores them to @val if found. * * It's recommended to call device_property_count_u16() instead of calling * this function with @val equals %NULL and @nval equals 0. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u16_array(const struct device *dev, const char *propname, u16 *val, size_t nval) { return fwnode_property_read_u16_array(dev_fwnode(dev), propname, val, nval); } EXPORT_SYMBOL_GPL(device_property_read_u16_array); /** * device_property_read_u32_array - return a u32 array property of a device * @dev: Device to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Function reads an array of u32 properties with @propname from the device * firmware description and stores them to @val if found. * * It's recommended to call device_property_count_u32() instead of calling * this function with @val equals %NULL and @nval equals 0. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u32_array(const struct device *dev, const char *propname, u32 *val, size_t nval) { return fwnode_property_read_u32_array(dev_fwnode(dev), propname, val, nval); } EXPORT_SYMBOL_GPL(device_property_read_u32_array); /** * device_property_read_u64_array - return a u64 array property of a device * @dev: Device to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Function reads an array of u64 properties with @propname from the device * firmware description and stores them to @val if found. * * It's recommended to call device_property_count_u64() instead of calling * this function with @val equals %NULL and @nval equals 0. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_u64_array(const struct device *dev, const char *propname, u64 *val, size_t nval) { return fwnode_property_read_u64_array(dev_fwnode(dev), propname, val, nval); } EXPORT_SYMBOL_GPL(device_property_read_u64_array); /** * device_property_read_string_array - return a string array property of device * @dev: Device to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Function reads an array of string properties with @propname from the device * firmware description and stores them to @val if found. * * It's recommended to call device_property_string_array_count() instead of calling * this function with @val equals %NULL and @nval equals 0. * * Return: number of values read on success if @val is non-NULL, * number of values available on success if @val is NULL, * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property is not an array of strings, * %-EOVERFLOW if the size of the property is not as expected. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_string_array(const struct device *dev, const char *propname, const char **val, size_t nval) { return fwnode_property_read_string_array(dev_fwnode(dev), propname, val, nval); } EXPORT_SYMBOL_GPL(device_property_read_string_array); /** * device_property_read_string - return a string property of a device * @dev: Device to get the property of * @propname: Name of the property * @val: The value is stored here * * Function reads property @propname from the device firmware description and * stores the value into @val if found. The value is checked to be a string. * * Return: %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property type is not a string. * %-ENXIO if no suitable firmware interface is present. */ int device_property_read_string(const struct device *dev, const char *propname, const char **val) { return fwnode_property_read_string(dev_fwnode(dev), propname, val); } EXPORT_SYMBOL_GPL(device_property_read_string); /** * device_property_match_string - find a string in an array and return index * @dev: Device to get the property of * @propname: Name of the property holding the array * @string: String to look for * * Find a given string in a string array and if it is found return the * index back. * * Return: index, starting from %0, if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of strings, * %-ENXIO if no suitable firmware interface is present. */ int device_property_match_string(const struct device *dev, const char *propname, const char *string) { return fwnode_property_match_string(dev_fwnode(dev), propname, string); } EXPORT_SYMBOL_GPL(device_property_match_string); static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode, const char *propname, unsigned int elem_size, void *val, size_t nval) { int ret; if (IS_ERR_OR_NULL(fwnode)) return -EINVAL; ret = fwnode_call_int_op(fwnode, property_read_int_array, propname, elem_size, val, nval); if (ret != -EINVAL) return ret; return fwnode_call_int_op(fwnode->secondary, property_read_int_array, propname, elem_size, val, nval); } /** * fwnode_property_read_u8_array - return a u8 array property of firmware node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Read an array of u8 properties with @propname from @fwnode and stores them to * @val if found. * * It's recommended to call fwnode_property_count_u8() instead of calling * this function with @val equals %NULL and @nval equals 0. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode, const char *propname, u8 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u8), val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array); /** * fwnode_property_read_u16_array - return a u16 array property of firmware node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Read an array of u16 properties with @propname from @fwnode and store them to * @val if found. * * It's recommended to call fwnode_property_count_u16() instead of calling * this function with @val equals %NULL and @nval equals 0. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode, const char *propname, u16 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u16), val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array); /** * fwnode_property_read_u32_array - return a u32 array property of firmware node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Read an array of u32 properties with @propname from @fwnode store them to * @val if found. * * It's recommended to call fwnode_property_count_u32() instead of calling * this function with @val equals %NULL and @nval equals 0. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode, const char *propname, u32 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u32), val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array); /** * fwnode_property_read_u64_array - return a u64 array property firmware node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Read an array of u64 properties with @propname from @fwnode and store them to * @val if found. * * It's recommended to call fwnode_property_count_u64() instead of calling * this function with @val equals %NULL and @nval equals 0. * * Return: number of values if @val was %NULL, * %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of numbers, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode, const char *propname, u64 *val, size_t nval) { return fwnode_property_read_int_array(fwnode, propname, sizeof(u64), val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array); /** * fwnode_property_read_string_array - return string array property of a node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The values are stored here or %NULL to return the number of values * @nval: Size of the @val array * * Read an string list property @propname from the given firmware node and store * them to @val if found. * * It's recommended to call fwnode_property_string_array_count() instead of calling * this function with @val equals %NULL and @nval equals 0. * * Return: number of values read on success if @val is non-NULL, * number of values available on success if @val is NULL, * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property is not an array of strings, * %-EOVERFLOW if the size of the property is not as expected, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_string_array(const struct fwnode_handle *fwnode, const char *propname, const char **val, size_t nval) { int ret; if (IS_ERR_OR_NULL(fwnode)) return -EINVAL; ret = fwnode_call_int_op(fwnode, property_read_string_array, propname, val, nval); if (ret != -EINVAL) return ret; return fwnode_call_int_op(fwnode->secondary, property_read_string_array, propname, val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); /** * fwnode_property_read_string - return a string property of a firmware node * @fwnode: Firmware node to get the property of * @propname: Name of the property * @val: The value is stored here * * Read property @propname from the given firmware node and store the value into * @val if found. The value is checked to be a string. * * Return: %0 if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property is not a string, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_read_string(const struct fwnode_handle *fwnode, const char *propname, const char **val) { int ret = fwnode_property_read_string_array(fwnode, propname, val, 1); return ret < 0 ? ret : 0; } EXPORT_SYMBOL_GPL(fwnode_property_read_string); /** * fwnode_property_match_string - find a string in an array and return index * @fwnode: Firmware node to get the property of * @propname: Name of the property holding the array * @string: String to look for * * Find a given string in a string array and if it is found return the * index back. * * Return: index, starting from %0, if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of strings, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_match_string(const struct fwnode_handle *fwnode, const char *propname, const char *string) { const char **values; int nval, ret; nval = fwnode_property_string_array_count(fwnode, propname); if (nval < 0) return nval; if (nval == 0) return -ENODATA; values = kcalloc(nval, sizeof(*values), GFP_KERNEL); if (!values) return -ENOMEM; ret = fwnode_property_read_string_array(fwnode, propname, values, nval); if (ret < 0) goto out_free; ret = match_string(values, nval, string); if (ret < 0) ret = -ENODATA; out_free: kfree(values); return ret; } EXPORT_SYMBOL_GPL(fwnode_property_match_string); /** * fwnode_property_match_property_string - find a property string value in an array and return index * @fwnode: Firmware node to get the property of * @propname: Name of the property holding the string value * @array: String array to search in * @n: Size of the @array * * Find a property string value in a given @array and if it is found return * the index back. * * Return: index, starting from %0, if the string value was found in the @array (success), * %-ENOENT when the string value was not found in the @array, * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO or %-EILSEQ if the property is not a string, * %-ENXIO if no suitable firmware interface is present. */ int fwnode_property_match_property_string(const struct fwnode_handle *fwnode, const char *propname, const char * const *array, size_t n) { const char *string; int ret; ret = fwnode_property_read_string(fwnode, propname, &string); if (ret) return ret; ret = match_string(array, n, string); if (ret < 0) ret = -ENOENT; return ret; } EXPORT_SYMBOL_GPL(fwnode_property_match_property_string); /** * fwnode_property_get_reference_args() - Find a reference with arguments * @fwnode: Firmware node where to look for the reference * @prop: The name of the property * @nargs_prop: The name of the property telling the number of * arguments in the referred node. NULL if @nargs is known, * otherwise @nargs is ignored. Only relevant on OF. * @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL. * @index: Index of the reference, from zero onwards. * @args: Result structure with reference and integer arguments. * May be NULL. * * Obtain a reference based on a named property in an fwnode, with * integer arguments. * * The caller is responsible for calling fwnode_handle_put() on the returned * @args->fwnode pointer. * * Return: %0 on success * %-ENOENT when the index is out of bounds, the index has an empty * reference or the property was not found * %-EINVAL on parse error */ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, const char *prop, const char *nargs_prop, unsigned int nargs, unsigned int index, struct fwnode_reference_args *args) { int ret; if (IS_ERR_OR_NULL(fwnode)) return -ENOENT; ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop, nargs, index, args); if (ret == 0) return ret; if (IS_ERR_OR_NULL(fwnode->secondary)) return ret; return fwnode_call_int_op(fwnode->secondary, get_reference_args, prop, nargs_prop, nargs, index, args); } EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args); /** * fwnode_find_reference - Find named reference to a fwnode_handle * @fwnode: Firmware node where to look for the reference * @name: The name of the reference * @index: Index of the reference * * @index can be used when the named reference holds a table of references. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. * * Return: a pointer to the reference fwnode, when found. Otherwise, * returns an error pointer. */ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode, const char *name, unsigned int index) { struct fwnode_reference_args args; int ret; ret = fwnode_property_get_reference_args(fwnode, name, NULL, 0, index, &args); return ret ? ERR_PTR(ret) : args.fwnode; } EXPORT_SYMBOL_GPL(fwnode_find_reference); /** * fwnode_get_name - Return the name of a node * @fwnode: The firmware node * * Return: a pointer to the node name, or %NULL. */ const char *fwnode_get_name(const struct fwnode_handle *fwnode) { return fwnode_call_ptr_op(fwnode, get_name); } EXPORT_SYMBOL_GPL(fwnode_get_name); /** * fwnode_get_name_prefix - Return the prefix of node for printing purposes * @fwnode: The firmware node * * Return: the prefix of a node, intended to be printed right before the node. * The prefix works also as a separator between the nodes. */ const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode) { return fwnode_call_ptr_op(fwnode, get_name_prefix); } /** * fwnode_name_eq - Return true if node name is equal * @fwnode: The firmware node * @name: The name to which to compare the node name * * Compare the name provided as an argument to the name of the node, stopping * the comparison at either NUL or '@' character, whichever comes first. This * function is generally used for comparing node names while ignoring the * possible unit address of the node. * * Return: true if the node name matches with the name provided in the @name * argument, false otherwise. */ bool fwnode_name_eq(const struct fwnode_handle *fwnode, const char *name) { const char *node_name; ptrdiff_t len; node_name = fwnode_get_name(fwnode); if (!node_name) return false; len = strchrnul(node_name, '@') - node_name; return str_has_prefix(node_name, name) == len; } EXPORT_SYMBOL_GPL(fwnode_name_eq); /** * fwnode_get_parent - Return parent firwmare node * @fwnode: Firmware whose parent is retrieved * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. * * Return: parent firmware node of the given node if possible or %NULL if no * parent was available. */ struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode) { return fwnode_call_ptr_op(fwnode, get_parent); } EXPORT_SYMBOL_GPL(fwnode_get_parent); /** * fwnode_get_next_parent - Iterate to the node's parent * @fwnode: Firmware whose parent is retrieved * * This is like fwnode_get_parent() except that it drops the refcount * on the passed node, making it suitable for iterating through a * node's parents. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. Note that this function also puts a reference to @fwnode * unconditionally. * * Return: parent firmware node of the given node if possible or %NULL if no * parent was available. */ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode) { struct fwnode_handle *parent = fwnode_get_parent(fwnode); fwnode_handle_put(fwnode); return parent; } EXPORT_SYMBOL_GPL(fwnode_get_next_parent); /** * fwnode_count_parents - Return the number of parents a node has * @fwnode: The node the parents of which are to be counted * * Return: the number of parents a node has. */ unsigned int fwnode_count_parents(const struct fwnode_handle *fwnode) { struct fwnode_handle *parent; unsigned int count = 0; fwnode_for_each_parent_node(fwnode, parent) count++; return count; } EXPORT_SYMBOL_GPL(fwnode_count_parents); /** * fwnode_get_nth_parent - Return an nth parent of a node * @fwnode: The node the parent of which is requested * @depth: Distance of the parent from the node * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. * * Return: the nth parent of a node. If there is no parent at the requested * @depth, %NULL is returned. If @depth is 0, the functionality is equivalent to * fwnode_handle_get(). For @depth == 1, it is fwnode_get_parent() and so on. */ struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode, unsigned int depth) { struct fwnode_handle *parent; if (depth == 0) return fwnode_handle_get(fwnode); fwnode_for_each_parent_node(fwnode, parent) { if (--depth == 0) return parent; } return NULL; } EXPORT_SYMBOL_GPL(fwnode_get_nth_parent); /** * fwnode_get_next_child_node - Return the next child node handle for a node * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the node's child nodes or a %NULL handle. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. Note that this function also puts a reference to @child * unconditionally. */ struct fwnode_handle * fwnode_get_next_child_node(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { return fwnode_call_ptr_op(fwnode, get_next_child_node, child); } EXPORT_SYMBOL_GPL(fwnode_get_next_child_node); /** * fwnode_get_next_available_child_node - Return the next available child node handle for a node * @fwnode: Firmware node to find the next child node for. * @child: Handle to one of the node's child nodes or a %NULL handle. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. Note that this function also puts a reference to @child * unconditionally. */ struct fwnode_handle * fwnode_get_next_available_child_node(const struct fwnode_handle *fwnode, struct fwnode_handle *child) { struct fwnode_handle *next_child = child; if (IS_ERR_OR_NULL(fwnode)) return NULL; do { next_child = fwnode_get_next_child_node(fwnode, next_child); if (!next_child) return NULL; } while (!fwnode_device_is_available(next_child)); return next_child; } EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node); /** * device_get_next_child_node - Return the next child node handle for a device * @dev: Device to find the next child node for. * @child: Handle to one of the device's child nodes or a %NULL handle. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. Note that this function also puts a reference to @child * unconditionally. */ struct fwnode_handle *device_get_next_child_node(const struct device *dev, struct fwnode_handle *child) { const struct fwnode_handle *fwnode = dev_fwnode(dev); struct fwnode_handle *next; if (IS_ERR_OR_NULL(fwnode)) return NULL; /* Try to find a child in primary fwnode */ next = fwnode_get_next_child_node(fwnode, child); if (next) return next; /* When no more children in primary, continue with secondary */ return fwnode_get_next_child_node(fwnode->secondary, child); } EXPORT_SYMBOL_GPL(device_get_next_child_node); /** * fwnode_get_named_child_node - Return first matching named child node handle * @fwnode: Firmware node to find the named child node for. * @childname: String to match child node name against. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. */ struct fwnode_handle * fwnode_get_named_child_node(const struct fwnode_handle *fwnode, const char *childname) { return fwnode_call_ptr_op(fwnode, get_named_child_node, childname); } EXPORT_SYMBOL_GPL(fwnode_get_named_child_node); /** * device_get_named_child_node - Return first matching named child node handle * @dev: Device to find the named child node for. * @childname: String to match child node name against. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. */ struct fwnode_handle *device_get_named_child_node(const struct device *dev, const char *childname) { return fwnode_get_named_child_node(dev_fwnode(dev), childname); } EXPORT_SYMBOL_GPL(device_get_named_child_node); /** * fwnode_handle_get - Obtain a reference to a device node * @fwnode: Pointer to the device node to obtain the reference to. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. * * Return: the fwnode handle. */ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode) { if (!fwnode_has_op(fwnode, get)) return fwnode; return fwnode_call_ptr_op(fwnode, get); } EXPORT_SYMBOL_GPL(fwnode_handle_get); /** * fwnode_device_is_available - check if a device is available for use * @fwnode: Pointer to the fwnode of the device. * * Return: true if device is available for use. Otherwise, returns false. * * For fwnode node types that don't implement the .device_is_available() * operation, this function returns true. */ bool fwnode_device_is_available(const struct fwnode_handle *fwnode) { if (IS_ERR_OR_NULL(fwnode)) return false; if (!fwnode_has_op(fwnode, device_is_available)) return true; return fwnode_call_bool_op(fwnode, device_is_available); } EXPORT_SYMBOL_GPL(fwnode_device_is_available); /** * device_get_child_node_count - return the number of child nodes for device * @dev: Device to count the child nodes for * * Return: the number of child nodes for a given device. */ unsigned int device_get_child_node_count(const struct device *dev) { struct fwnode_handle *child; unsigned int count = 0; device_for_each_child_node(dev, child) count++; return count; } EXPORT_SYMBOL_GPL(device_get_child_node_count); bool device_dma_supported(const struct device *dev) { return fwnode_call_bool_op(dev_fwnode(dev), device_dma_supported); } EXPORT_SYMBOL_GPL(device_dma_supported); enum dev_dma_attr device_get_dma_attr(const struct device *dev) { if (!fwnode_has_op(dev_fwnode(dev), device_get_dma_attr)) return DEV_DMA_NOT_SUPPORTED; return fwnode_call_int_op(dev_fwnode(dev), device_get_dma_attr); } EXPORT_SYMBOL_GPL(device_get_dma_attr); /** * fwnode_get_phy_mode - Get phy mode for given firmware node * @fwnode: Pointer to the given node * * The function gets phy interface string from property 'phy-mode' or * 'phy-connection-type', and return its index in phy_modes table, or errno in * error case. */ int fwnode_get_phy_mode(const struct fwnode_handle *fwnode) { const char *pm; int err, i; err = fwnode_property_read_string(fwnode, "phy-mode", &pm); if (err < 0) err = fwnode_property_read_string(fwnode, "phy-connection-type", &pm); if (err < 0) return err; for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) if (!strcasecmp(pm, phy_modes(i))) return i; return -ENODEV; } EXPORT_SYMBOL_GPL(fwnode_get_phy_mode); /** * device_get_phy_mode - Get phy mode for given device * @dev: Pointer to the given device * * The function gets phy interface string from property 'phy-mode' or * 'phy-connection-type', and return its index in phy_modes table, or errno in * error case. */ int device_get_phy_mode(struct device *dev) { return fwnode_get_phy_mode(dev_fwnode(dev)); } EXPORT_SYMBOL_GPL(device_get_phy_mode); /** * fwnode_iomap - Maps the memory mapped IO for a given fwnode * @fwnode: Pointer to the firmware node * @index: Index of the IO range * * Return: a pointer to the mapped memory. */ void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index) { return fwnode_call_ptr_op(fwnode, iomap, index); } EXPORT_SYMBOL(fwnode_iomap); /** * fwnode_irq_get - Get IRQ directly from a fwnode * @fwnode: Pointer to the firmware node * @index: Zero-based index of the IRQ * * Return: Linux IRQ number on success. Negative errno on failure. */ int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index) { int ret; ret = fwnode_call_int_op(fwnode, irq_get, index); /* We treat mapping errors as invalid case */ if (ret == 0) return -EINVAL; return ret; } EXPORT_SYMBOL(fwnode_irq_get); /** * fwnode_irq_get_byname - Get IRQ from a fwnode using its name * @fwnode: Pointer to the firmware node * @name: IRQ name * * Description: * Find a match to the string @name in the 'interrupt-names' string array * in _DSD for ACPI, or of_node for Device Tree. Then get the Linux IRQ * number of the IRQ resource corresponding to the index of the matched * string. * * Return: Linux IRQ number on success, or negative errno otherwise. */ int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name) { int index; if (!name) return -EINVAL; index = fwnode_property_match_string(fwnode, "interrupt-names", name); if (index < 0) return index; return fwnode_irq_get(fwnode, index); } EXPORT_SYMBOL(fwnode_irq_get_byname); /** * fwnode_graph_get_next_endpoint - Get next endpoint firmware node * @fwnode: Pointer to the parent firmware node * @prev: Previous endpoint node or %NULL to get the first * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. Note that this function also puts a reference to @prev * unconditionally. * * Return: an endpoint firmware node pointer or %NULL if no more endpoints * are available. */ struct fwnode_handle * fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode, struct fwnode_handle *prev) { struct fwnode_handle *ep, *port_parent = NULL; const struct fwnode_handle *parent; /* * If this function is in a loop and the previous iteration returned * an endpoint from fwnode->secondary, then we need to use the secondary * as parent rather than @fwnode. */ if (prev) { port_parent = fwnode_graph_get_port_parent(prev); parent = port_parent; } else { parent = fwnode; } if (IS_ERR_OR_NULL(parent)) return NULL; ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev); if (ep) goto out_put_port_parent; ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL); out_put_port_parent: fwnode_handle_put(port_parent); return ep; } EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); /** * fwnode_graph_get_port_parent - Return the device fwnode of a port endpoint * @endpoint: Endpoint firmware node of the port * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. * * Return: the firmware node of the device the @endpoint belongs to. */ struct fwnode_handle * fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint) { struct fwnode_handle *port, *parent; port = fwnode_get_parent(endpoint); parent = fwnode_call_ptr_op(port, graph_get_port_parent); fwnode_handle_put(port); return parent; } EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent); /** * fwnode_graph_get_remote_port_parent - Return fwnode of a remote device * @fwnode: Endpoint firmware node pointing to the remote endpoint * * Extracts firmware node of a remote device the @fwnode points to. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. */ struct fwnode_handle * fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode) { struct fwnode_handle *endpoint, *parent; endpoint = fwnode_graph_get_remote_endpoint(fwnode); parent = fwnode_graph_get_port_parent(endpoint); fwnode_handle_put(endpoint); return parent; } EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent); /** * fwnode_graph_get_remote_port - Return fwnode of a remote port * @fwnode: Endpoint firmware node pointing to the remote endpoint * * Extracts firmware node of a remote port the @fwnode points to. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. */ struct fwnode_handle * fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode) { return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode)); } EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port); /** * fwnode_graph_get_remote_endpoint - Return fwnode of a remote endpoint * @fwnode: Endpoint firmware node pointing to the remote endpoint * * Extracts firmware node of a remote endpoint the @fwnode points to. * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. */ struct fwnode_handle * fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode) { return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint); } EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint); static bool fwnode_graph_remote_available(struct fwnode_handle *ep) { struct fwnode_handle *dev_node; bool available; dev_node = fwnode_graph_get_remote_port_parent(ep); available = fwnode_device_is_available(dev_node); fwnode_handle_put(dev_node); return available; } /** * fwnode_graph_get_endpoint_by_id - get endpoint by port and endpoint numbers * @fwnode: parent fwnode_handle containing the graph * @port: identifier of the port node * @endpoint: identifier of the endpoint node under the port node * @flags: fwnode lookup flags * * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. * * Return: the fwnode handle of the local endpoint corresponding the port and * endpoint IDs or %NULL if not found. * * If FWNODE_GRAPH_ENDPOINT_NEXT is passed in @flags and the specified endpoint * has not been found, look for the closest endpoint ID greater than the * specified one and return the endpoint that corresponds to it, if present. * * Does not return endpoints that belong to disabled devices or endpoints that * are unconnected, unless FWNODE_GRAPH_DEVICE_DISABLED is passed in @flags. */ struct fwnode_handle * fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode, u32 port, u32 endpoint, unsigned long flags) { struct fwnode_handle *ep, *best_ep = NULL; unsigned int best_ep_id = 0; bool endpoint_next = flags & FWNODE_GRAPH_ENDPOINT_NEXT; bool enabled_only = !(flags & FWNODE_GRAPH_DEVICE_DISABLED); fwnode_graph_for_each_endpoint(fwnode, ep) { struct fwnode_endpoint fwnode_ep = { 0 }; int ret; if (enabled_only && !fwnode_graph_remote_available(ep)) continue; ret = fwnode_graph_parse_endpoint(ep, &fwnode_ep); if (ret < 0) continue; if (fwnode_ep.port != port) continue; if (fwnode_ep.id == endpoint) return ep; if (!endpoint_next) continue; /* * If the endpoint that has just been found is not the first * matching one and the ID of the one found previously is closer * to the requested endpoint ID, skip it. */ if (fwnode_ep.id < endpoint || (best_ep && best_ep_id < fwnode_ep.id)) continue; fwnode_handle_put(best_ep); best_ep = fwnode_handle_get(ep); best_ep_id = fwnode_ep.id; } return best_ep; } EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_by_id); /** * fwnode_graph_get_endpoint_count - Count endpoints on a device node * @fwnode: The node related to a device * @flags: fwnode lookup flags * Count endpoints in a device node. * * If FWNODE_GRAPH_DEVICE_DISABLED flag is specified, also unconnected endpoints * and endpoints connected to disabled devices are counted. */ unsigned int fwnode_graph_get_endpoint_count(const struct fwnode_handle *fwnode, unsigned long flags) { struct fwnode_handle *ep; unsigned int count = 0; fwnode_graph_for_each_endpoint(fwnode, ep) { if (flags & FWNODE_GRAPH_DEVICE_DISABLED || fwnode_graph_remote_available(ep)) count++; } return count; } EXPORT_SYMBOL_GPL(fwnode_graph_get_endpoint_count); /** * fwnode_graph_parse_endpoint - parse common endpoint node properties * @fwnode: pointer to endpoint fwnode_handle * @endpoint: pointer to the fwnode endpoint data structure * * Parse @fwnode representing a graph endpoint node and store the * information in @endpoint. The caller must hold a reference to * @fwnode. */ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint) { memset(endpoint, 0, sizeof(*endpoint)); return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint); } EXPORT_SYMBOL(fwnode_graph_parse_endpoint); const void *device_get_match_data(const struct device *dev) { return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev); } EXPORT_SYMBOL_GPL(device_get_match_data); static unsigned int fwnode_graph_devcon_matches(const struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match, void **matches, unsigned int matches_len) { struct fwnode_handle *node; struct fwnode_handle *ep; unsigned int count = 0; void *ret; fwnode_graph_for_each_endpoint(fwnode, ep) { if (matches && count >= matches_len) { fwnode_handle_put(ep); break; } node = fwnode_graph_get_remote_port_parent(ep); if (!fwnode_device_is_available(node)) { fwnode_handle_put(node); continue; } ret = match(node, con_id, data); fwnode_handle_put(node); if (ret) { if (matches) matches[count] = ret; count++; } } return count; } static unsigned int fwnode_devcon_matches(const struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match, void **matches, unsigned int matches_len) { struct fwnode_handle *node; unsigned int count = 0; unsigned int i; void *ret; for (i = 0; ; i++) { if (matches && count >= matches_len) break; node = fwnode_find_reference(fwnode, con_id, i); if (IS_ERR(node)) break; ret = match(node, NULL, data); fwnode_handle_put(node); if (ret) { if (matches) matches[count] = ret; count++; } } return count; } /** * fwnode_connection_find_match - Find connection from a device node * @fwnode: Device node with the connection * @con_id: Identifier for the connection * @data: Data for the match function * @match: Function to check and convert the connection description * * Find a connection with unique identifier @con_id between @fwnode and another * device node. @match will be used to convert the connection description to * data the caller is expecting to be returned. */ void *fwnode_connection_find_match(const struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match) { unsigned int count; void *ret; if (!fwnode || !match) return NULL; count = fwnode_graph_devcon_matches(fwnode, con_id, data, match, &ret, 1); if (count) return ret; count = fwnode_devcon_matches(fwnode, con_id, data, match, &ret, 1); return count ? ret : NULL; } EXPORT_SYMBOL_GPL(fwnode_connection_find_match); /** * fwnode_connection_find_matches - Find connections from a device node * @fwnode: Device node with the connection * @con_id: Identifier for the connection * @data: Data for the match function * @match: Function to check and convert the connection description * @matches: (Optional) array of pointers to fill with matches * @matches_len: Length of @matches * * Find up to @matches_len connections with unique identifier @con_id between * @fwnode and other device nodes. @match will be used to convert the * connection description to data the caller is expecting to be returned * through the @matches array. * * If @matches is %NULL @matches_len is ignored and the total number of resolved * matches is returned. * * Return: Number of matches resolved, or negative errno. */ int fwnode_connection_find_matches(const struct fwnode_handle *fwnode, const char *con_id, void *data, devcon_match_fn_t match, void **matches, unsigned int matches_len) { unsigned int count_graph; unsigned int count_ref; if (!fwnode || !match) return -EINVAL; count_graph = fwnode_graph_devcon_matches(fwnode, con_id, data, match, matches, matches_len); if (matches) { matches += count_graph; matches_len -= count_graph; } count_ref = fwnode_devcon_matches(fwnode, con_id, data, match, matches, matches_len); return count_graph + count_ref; } EXPORT_SYMBOL_GPL(fwnode_connection_find_matches);
2 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 /* * af_llc.c - LLC User Interface SAPs * Description: * Functions in this module are implementation of socket based llc * communications for the Linux operating system. Support of llc class * one and class two is provided via SOCK_DGRAM and SOCK_STREAM * respectively. * * An llc2 connection is (mac + sap), only one llc2 sap connection * is allowed per mac. Though one sap may have multiple mac + sap * connections. * * Copyright (c) 2001 by Jay Schulist <jschlst@samba.org> * 2002-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/compiler.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/sched/signal.h> #include <net/llc.h> #include <net/llc_sap.h> #include <net/llc_pdu.h> #include <net/llc_conn.h> #include <net/tcp_states.h> /* remember: uninitialized global data is zeroed because its in .bss */ static u16 llc_ui_sap_last_autoport = LLC_SAP_DYN_START; static u16 llc_ui_sap_link_no_max[256]; static struct sockaddr_llc llc_ui_addrnull; static const struct proto_ops llc_ui_ops; static bool llc_ui_wait_for_conn(struct sock *sk, long timeout); static int llc_ui_wait_for_disc(struct sock *sk, long timeout); static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout); #if 0 #define dprintk(args...) printk(KERN_DEBUG args) #else #define dprintk(args...) do {} while (0) #endif /* Maybe we'll add some more in the future. */ #define LLC_CMSG_PKTINFO 1 /** * llc_ui_next_link_no - return the next unused link number for a sap * @sap: Address of sap to get link number from. * * Return the next unused link number for a given sap. */ static inline u16 llc_ui_next_link_no(int sap) { return llc_ui_sap_link_no_max[sap]++; } /** * llc_proto_type - return eth protocol for ARP header type * @arphrd: ARP header type. * * Given an ARP header type return the corresponding ethernet protocol. */ static inline __be16 llc_proto_type(u16 arphrd) { return htons(ETH_P_802_2); } /** * llc_ui_addr_null - determines if a address structure is null * @addr: Address to test if null. */ static inline u8 llc_ui_addr_null(struct sockaddr_llc *addr) { return !memcmp(addr, &llc_ui_addrnull, sizeof(*addr)); } /** * llc_ui_header_len - return length of llc header based on operation * @sk: Socket which contains a valid llc socket type. * @addr: Complete sockaddr_llc structure received from the user. * * Provide the length of the llc header depending on what kind of * operation the user would like to perform and the type of socket. * Returns the correct llc header length. */ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr) { u8 rc = LLC_PDU_LEN_U; if (addr->sllc_test) rc = LLC_PDU_LEN_U; else if (addr->sllc_xid) /* We need to expand header to sizeof(struct llc_xid_info) * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header * as XID PDU. In llc_ui_sendmsg() we reserved header size and then * filled all other space with user data. If we won't reserve this * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data */ rc = LLC_PDU_LEN_U_XID; else if (sk->sk_type == SOCK_STREAM) rc = LLC_PDU_LEN_I; return rc; } /** * llc_ui_send_data - send data via reliable llc2 connection * @sk: Connection the socket is using. * @skb: Data the user wishes to send. * @noblock: can we block waiting for data? * * Send data via reliable llc2 connection. * Returns 0 upon success, non-zero if action did not succeed. * * This function always consumes a reference to the skb. */ static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock) { struct llc_sock* llc = llc_sk(sk); if (unlikely(llc_data_accept_state(llc->state) || llc->remote_busy_flag || llc->p_flag)) { long timeout = sock_sndtimeo(sk, noblock); int rc; rc = llc_ui_wait_for_busy_core(sk, timeout); if (rc) { kfree_skb(skb); return rc; } } return llc_build_and_send_pkt(sk, skb); } static void llc_ui_sk_init(struct socket *sock, struct sock *sk) { sock_graft(sk, sock); sk->sk_type = sock->type; sock->ops = &llc_ui_ops; } static struct proto llc_proto = { .name = "LLC", .owner = THIS_MODULE, .obj_size = sizeof(struct llc_sock), .slab_flags = SLAB_TYPESAFE_BY_RCU, }; /** * llc_ui_create - alloc and init a new llc_ui socket * @net: network namespace (must be default network) * @sock: Socket to initialize and attach allocated sk to. * @protocol: Unused. * @kern: on behalf of kernel or userspace * * Allocate and initialize a new llc_ui socket, validate the user wants a * socket type we have available. * Returns 0 upon success, negative upon failure. */ static int llc_ui_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; int rc = -ESOCKTNOSUPPORT; if (!ns_capable(net->user_ns, CAP_NET_RAW)) return -EPERM; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; if (likely(sock->type == SOCK_DGRAM || sock->type == SOCK_STREAM)) { rc = -ENOMEM; sk = llc_sk_alloc(net, PF_LLC, GFP_KERNEL, &llc_proto, kern); if (sk) { rc = 0; llc_ui_sk_init(sock, sk); } } return rc; } /** * llc_ui_release - shutdown socket * @sock: Socket to release. * * Shutdown and deallocate an existing socket. */ static int llc_ui_release(struct socket *sock) { struct sock *sk = sock->sk; struct llc_sock *llc; if (unlikely(sk == NULL)) goto out; sock_hold(sk); lock_sock(sk); llc = llc_sk(sk); dprintk("%s: closing local(%02X) remote(%02X)\n", __func__, llc->laddr.lsap, llc->daddr.lsap); if (!llc_send_disc(sk)) llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); if (!sock_flag(sk, SOCK_ZAPPED)) { struct llc_sap *sap = llc->sap; /* Hold this for release_sock(), so that llc_backlog_rcv() * could still use it. */ llc_sap_hold(sap); llc_sap_remove_socket(llc->sap, sk); release_sock(sk); llc_sap_put(sap); } else { release_sock(sk); } netdev_put(llc->dev, &llc->dev_tracker); sock_put(sk); sock_orphan(sk); sock->sk = NULL; llc_sk_free(sk); out: return 0; } /** * llc_ui_autoport - provide dynamically allocate SAP number * * Provide the caller with a dynamically allocated SAP number according * to the rules that are set in this function. Returns: 0, upon failure, * SAP number otherwise. */ static int llc_ui_autoport(void) { struct llc_sap *sap; int i, tries = 0; while (tries < LLC_SAP_DYN_TRIES) { for (i = llc_ui_sap_last_autoport; i < LLC_SAP_DYN_STOP; i += 2) { sap = llc_sap_find(i); if (!sap) { llc_ui_sap_last_autoport = i + 2; goto out; } llc_sap_put(sap); } llc_ui_sap_last_autoport = LLC_SAP_DYN_START; tries++; } i = 0; out: return i; } /** * llc_ui_autobind - automatically bind a socket to a sap * @sock: socket to bind * @addr: address to connect to * * Used by llc_ui_connect and llc_ui_sendmsg when the user hasn't * specifically used llc_ui_bind to bind to an specific address/sap * * Returns: 0 upon success, negative otherwise. */ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); struct net_device *dev = NULL; struct llc_sap *sap; int rc = -EINVAL; if (!sock_flag(sk, SOCK_ZAPPED)) goto out; if (!addr->sllc_arphrd) addr->sllc_arphrd = ARPHRD_ETHER; if (addr->sllc_arphrd != ARPHRD_ETHER) goto out; rc = -ENODEV; if (sk->sk_bound_dev_if) { dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); if (dev && addr->sllc_arphrd != dev->type) { dev_put(dev); dev = NULL; } } else dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd); if (!dev) goto out; rc = -EUSERS; llc->laddr.lsap = llc_ui_autoport(); if (!llc->laddr.lsap) goto out; rc = -EBUSY; /* some other network layer is using the sap */ sap = llc_sap_open(llc->laddr.lsap, NULL); if (!sap) goto out; /* Note: We do not expect errors from this point. */ llc->dev = dev; netdev_tracker_alloc(llc->dev, &llc->dev_tracker, GFP_KERNEL); dev = NULL; memcpy(llc->laddr.mac, llc->dev->dev_addr, IFHWADDRLEN); memcpy(&llc->addr, addr, sizeof(llc->addr)); /* assign new connection to its SAP */ llc_sap_add_socket(sap, sk); sock_reset_flag(sk, SOCK_ZAPPED); rc = 0; out: dev_put(dev); return rc; } /** * llc_ui_bind - bind a socket to a specific address. * @sock: Socket to bind an address to. * @uaddr: Address the user wants the socket bound to. * @addrlen: Length of the uaddr structure. * * Bind a socket to a specific address. For llc a user is able to bind to * a specific sap only or mac + sap. * If the user desires to bind to a specific mac + sap, it is possible to * have multiple sap connections via multiple macs. * Bind and autobind for that matter must enforce the correct sap usage * otherwise all hell will break loose. * Returns: 0 upon success, negative otherwise. */ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) { struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); struct net_device *dev = NULL; struct llc_sap *sap; int rc = -EINVAL; lock_sock(sk); if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; if (!addr->sllc_arphrd) addr->sllc_arphrd = ARPHRD_ETHER; if (unlikely(addr->sllc_family != AF_LLC || addr->sllc_arphrd != ARPHRD_ETHER)) goto out; dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); rc = -ENODEV; rcu_read_lock(); if (sk->sk_bound_dev_if) { dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); if (dev) { if (is_zero_ether_addr(addr->sllc_mac)) memcpy(addr->sllc_mac, dev->dev_addr, IFHWADDRLEN); if (addr->sllc_arphrd != dev->type || !ether_addr_equal(addr->sllc_mac, dev->dev_addr)) { rc = -EINVAL; dev = NULL; } } } else { dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd, addr->sllc_mac); } dev_hold(dev); rcu_read_unlock(); if (!dev) goto out; if (!addr->sllc_sap) { rc = -EUSERS; addr->sllc_sap = llc_ui_autoport(); if (!addr->sllc_sap) goto out; } sap = llc_sap_find(addr->sllc_sap); if (!sap) { sap = llc_sap_open(addr->sllc_sap, NULL); rc = -EBUSY; /* some other network layer is using the sap */ if (!sap) goto out; } else { struct llc_addr laddr, daddr; struct sock *ask; memset(&laddr, 0, sizeof(laddr)); memset(&daddr, 0, sizeof(daddr)); /* * FIXME: check if the address is multicast, * only SOCK_DGRAM can do this. */ memcpy(laddr.mac, addr->sllc_mac, IFHWADDRLEN); laddr.lsap = addr->sllc_sap; rc = -EADDRINUSE; /* mac + sap clash. */ ask = llc_lookup_established(sap, &daddr, &laddr, &init_net); if (ask) { sock_put(ask); goto out_put; } } /* Note: We do not expect errors from this point. */ llc->dev = dev; netdev_tracker_alloc(llc->dev, &llc->dev_tracker, GFP_KERNEL); dev = NULL; llc->laddr.lsap = addr->sllc_sap; memcpy(llc->laddr.mac, addr->sllc_mac, IFHWADDRLEN); memcpy(&llc->addr, addr, sizeof(llc->addr)); /* assign new connection to its SAP */ llc_sap_add_socket(sap, sk); sock_reset_flag(sk, SOCK_ZAPPED); rc = 0; out_put: llc_sap_put(sap); out: dev_put(dev); release_sock(sk); return rc; } /** * llc_ui_shutdown - shutdown a connect llc2 socket. * @sock: Socket to shutdown. * @how: What part of the socket to shutdown. * * Shutdown a connected llc2 socket. Currently this function only supports * shutting down both sends and receives (2), we could probably make this * function such that a user can shutdown only half the connection but not * right now. * Returns: 0 upon success, negative otherwise. */ static int llc_ui_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; int rc = -ENOTCONN; lock_sock(sk); if (unlikely(sk->sk_state != TCP_ESTABLISHED)) goto out; rc = -EINVAL; if (how != 2) goto out; rc = llc_send_disc(sk); if (!rc) rc = llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); /* Wake up anyone sleeping in poll */ sk->sk_state_change(sk); out: release_sock(sk); return rc; } /** * llc_ui_connect - Connect to a remote llc2 mac + sap. * @sock: Socket which will be connected to the remote destination. * @uaddr: Remote and possibly the local address of the new connection. * @addrlen: Size of uaddr structure. * @flags: Operational flags specified by the user. * * Connect to a remote llc2 mac + sap. The caller must specify the * destination mac and address to connect to. If the user hasn't previously * called bind(2) with a smac the address of the first interface of the * specified arp type will be used. * This function will autobind if user did not previously call bind. * Returns: 0 upon success, negative otherwise. */ static int llc_ui_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); struct sockaddr_llc *addr = (struct sockaddr_llc *)uaddr; int rc = -EINVAL; lock_sock(sk); if (unlikely(addrlen != sizeof(*addr))) goto out; rc = -EAFNOSUPPORT; if (unlikely(addr->sllc_family != AF_LLC)) goto out; if (unlikely(sk->sk_type != SOCK_STREAM)) goto out; rc = -EALREADY; if (unlikely(sock->state == SS_CONNECTING)) goto out; /* bind connection to sap if user hasn't done it. */ if (sock_flag(sk, SOCK_ZAPPED)) { /* bind to sap with null dev, exclusive */ rc = llc_ui_autobind(sock, addr); if (rc) goto out; } llc->daddr.lsap = addr->sllc_sap; memcpy(llc->daddr.mac, addr->sllc_mac, IFHWADDRLEN); sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; llc->link = llc_ui_next_link_no(llc->sap->laddr.lsap); rc = llc_establish_connection(sk, llc->dev->dev_addr, addr->sllc_mac, addr->sllc_sap); if (rc) { dprintk("%s: llc_ui_send_conn failed :-(\n", __func__); sock->state = SS_UNCONNECTED; sk->sk_state = TCP_CLOSE; goto out; } if (sk->sk_state == TCP_SYN_SENT) { const long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); if (!timeo || !llc_ui_wait_for_conn(sk, timeo)) goto out; rc = sock_intr_errno(timeo); if (signal_pending(current)) goto out; } if (sk->sk_state == TCP_CLOSE) goto sock_error; sock->state = SS_CONNECTED; rc = 0; out: release_sock(sk); return rc; sock_error: rc = sock_error(sk) ? : -ECONNABORTED; sock->state = SS_UNCONNECTED; goto out; } /** * llc_ui_listen - allow a normal socket to accept incoming connections * @sock: Socket to allow incoming connections on. * @backlog: Number of connections to queue. * * Allow a normal socket to accept incoming connections. * Returns 0 upon success, negative otherwise. */ static int llc_ui_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int rc = -EINVAL; lock_sock(sk); if (unlikely(sock->state != SS_UNCONNECTED)) goto out; rc = -EOPNOTSUPP; if (unlikely(sk->sk_type != SOCK_STREAM)) goto out; rc = -EAGAIN; if (sock_flag(sk, SOCK_ZAPPED)) goto out; rc = 0; if (!(unsigned int)backlog) /* BSDism */ backlog = 1; sk->sk_max_ack_backlog = backlog; if (sk->sk_state != TCP_LISTEN) { sk->sk_ack_backlog = 0; sk->sk_state = TCP_LISTEN; } sk->sk_socket->flags |= __SO_ACCEPTCON; out: release_sock(sk); return rc; } static int llc_ui_wait_for_disc(struct sock *sk, long timeout) { DEFINE_WAIT_FUNC(wait, woken_wake_function); int rc = 0; add_wait_queue(sk_sleep(sk), &wait); while (1) { if (sk_wait_event(sk, &timeout, READ_ONCE(sk->sk_state) == TCP_CLOSE, &wait)) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; rc = 0; } remove_wait_queue(sk_sleep(sk), &wait); return rc; } static bool llc_ui_wait_for_conn(struct sock *sk, long timeout) { DEFINE_WAIT_FUNC(wait, woken_wake_function); add_wait_queue(sk_sleep(sk), &wait); while (1) { if (sk_wait_event(sk, &timeout, READ_ONCE(sk->sk_state) != TCP_SYN_SENT, &wait)) break; if (signal_pending(current) || !timeout) break; } remove_wait_queue(sk_sleep(sk), &wait); return timeout; } static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct llc_sock *llc = llc_sk(sk); int rc; add_wait_queue(sk_sleep(sk), &wait); while (1) { rc = 0; if (sk_wait_event(sk, &timeout, (READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN) || (!llc_data_accept_state(llc->state) && !llc->remote_busy_flag && !llc->p_flag), &wait)) break; rc = -ERESTARTSYS; if (signal_pending(current)) break; rc = -EAGAIN; if (!timeout) break; } remove_wait_queue(sk_sleep(sk), &wait); return rc; } static int llc_wait_data(struct sock *sk, long timeo) { int rc; while (1) { /* * POSIX 1003.1g mandates this order. */ rc = sock_error(sk); if (rc) break; rc = 0; if (sk->sk_shutdown & RCV_SHUTDOWN) break; rc = -EAGAIN; if (!timeo) break; rc = sock_intr_errno(timeo); if (signal_pending(current)) break; rc = 0; if (sk_wait_data(sk, &timeo, NULL)) break; } return rc; } static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(skb->sk); if (llc->cmsg_flags & LLC_CMSG_PKTINFO) { struct llc_pktinfo info; memset(&info, 0, sizeof(info)); info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex; llc_pdu_decode_dsap(skb, &info.lpi_sap); llc_pdu_decode_da(skb, info.lpi_mac); put_cmsg(msg, SOL_LLC, LLC_OPT_PKTINFO, sizeof(info), &info); } } /** * llc_ui_accept - accept a new incoming connection. * @sock: Socket which connections arrive on. * @newsock: Socket to move incoming connection to. * @arg: User specified arguments * * Accept a new incoming connection. * Returns 0 upon success, negative otherwise. */ static int llc_ui_accept(struct socket *sock, struct socket *newsock, struct proto_accept_arg *arg) { struct sock *sk = sock->sk, *newsk; struct llc_sock *llc, *newllc; struct sk_buff *skb; int rc = -EOPNOTSUPP; dprintk("%s: accepting on %02X\n", __func__, llc_sk(sk)->laddr.lsap); lock_sock(sk); if (unlikely(sk->sk_type != SOCK_STREAM)) goto out; rc = -EINVAL; if (unlikely(sock->state != SS_UNCONNECTED || sk->sk_state != TCP_LISTEN)) goto out; /* wait for a connection to arrive. */ if (skb_queue_empty(&sk->sk_receive_queue)) { rc = llc_wait_data(sk, sk->sk_rcvtimeo); if (rc) goto out; } dprintk("%s: got a new connection on %02X\n", __func__, llc_sk(sk)->laddr.lsap); skb = skb_dequeue(&sk->sk_receive_queue); rc = -EINVAL; if (!skb->sk) goto frees; rc = 0; newsk = skb->sk; /* attach connection to a new socket. */ llc_ui_sk_init(newsock, newsk); sock_reset_flag(newsk, SOCK_ZAPPED); newsk->sk_state = TCP_ESTABLISHED; newsock->state = SS_CONNECTED; llc = llc_sk(sk); newllc = llc_sk(newsk); memcpy(&newllc->addr, &llc->addr, sizeof(newllc->addr)); newllc->link = llc_ui_next_link_no(newllc->laddr.lsap); /* put original socket back into a clean listen state. */ sk->sk_state = TCP_LISTEN; sk_acceptq_removed(sk); dprintk("%s: ok success on %02X, client on %02X\n", __func__, llc_sk(sk)->addr.sllc_sap, newllc->daddr.lsap); frees: kfree_skb(skb); out: release_sock(sk); return rc; } /** * llc_ui_recvmsg - copy received data to the socket user. * @sock: Socket to copy data from. * @msg: Various user space related information. * @len: Size of user buffer. * @flags: User specified flags. * * Copy received data to the socket user. * Returns non-negative upon success, negative otherwise. */ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { DECLARE_SOCKADDR(struct sockaddr_llc *, uaddr, msg->msg_name); const int nonblock = flags & MSG_DONTWAIT; struct sk_buff *skb = NULL; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); size_t copied = 0; u32 peek_seq = 0; u32 *seq, skb_len; unsigned long used; int target; /* Read at least this many bytes */ long timeo; lock_sock(sk); copied = -ENOTCONN; if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) goto out; timeo = sock_rcvtimeo(sk, nonblock); seq = &llc->copied_seq; if (flags & MSG_PEEK) { peek_seq = llc->copied_seq; seq = &peek_seq; } target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); copied = 0; do { u32 offset; /* * We need to check signals first, to get correct SIGURG * handling. FIXME: Need to check this doesn't impact 1003.1g * and move it down to the bottom of the loop */ if (signal_pending(current)) { if (copied) break; copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; break; } /* Next get a buffer. */ skb = skb_peek(&sk->sk_receive_queue); if (skb) { offset = *seq; goto found_ok_skb; } /* Well, if we have backlog, try to process it now yet. */ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo || (flags & MSG_PEEK)) break; } else { if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { copied = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_CLOSE) { if (!sock_flag(sk, SOCK_DONE)) { /* * This occurs when user tries to read * from never connected socket. */ copied = -ENOTCONN; break; } break; } if (!timeo) { copied = -EAGAIN; break; } } if (copied >= target) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); } else sk_wait_data(sk, &timeo, NULL); if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", current->comm, task_pid_nr(current)); peek_seq = llc->copied_seq; } continue; found_ok_skb: skb_len = skb->len; /* Ok so how much can we use? */ used = skb->len - offset; if (len < used) used = len; if (!(flags & MSG_TRUNC)) { int rc = skb_copy_datagram_msg(skb, offset, msg, used); if (rc) { /* Exception. Bailout! */ if (!copied) copied = -EFAULT; break; } } *seq += used; copied += used; len -= used; /* For non stream protcols we get one packet per recvmsg call */ if (sk->sk_type != SOCK_STREAM) goto copy_uaddr; if (!(flags & MSG_PEEK)) { skb_unlink(skb, &sk->sk_receive_queue); kfree_skb(skb); *seq = 0; } /* Partial read */ if (used + offset < skb_len) continue; } while (len > 0); out: release_sock(sk); return copied; copy_uaddr: if (uaddr != NULL && skb != NULL) { memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr)); msg->msg_namelen = sizeof(*uaddr); } if (llc_sk(sk)->cmsg_flags) llc_cmsg_rcv(msg, skb); if (!(flags & MSG_PEEK)) { skb_unlink(skb, &sk->sk_receive_queue); kfree_skb(skb); *seq = 0; } goto out; } /** * llc_ui_sendmsg - Transmit data provided by the socket user. * @sock: Socket to transmit data from. * @msg: Various user related information. * @len: Length of data to transmit. * * Transmit data provided by the socket user. * Returns non-negative upon success, negative otherwise. */ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name); struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); int flags = msg->msg_flags; int noblock = flags & MSG_DONTWAIT; int rc = -EINVAL, copied = 0, hdrlen, hh_len; struct sk_buff *skb = NULL; struct net_device *dev; size_t size = 0; dprintk("%s: sending from %02X to %02X\n", __func__, llc->laddr.lsap, llc->daddr.lsap); lock_sock(sk); if (addr) { if (msg->msg_namelen < sizeof(*addr)) goto out; } else { if (llc_ui_addr_null(&llc->addr)) goto out; addr = &llc->addr; } /* must bind connection to sap if user hasn't done it. */ if (sock_flag(sk, SOCK_ZAPPED)) { /* bind to sap with null dev, exclusive. */ rc = llc_ui_autobind(sock, addr); if (rc) goto out; } dev = llc->dev; hh_len = LL_RESERVED_SPACE(dev); hdrlen = llc_ui_header_len(sk, addr); size = hdrlen + len; size = min_t(size_t, size, READ_ONCE(dev->mtu)); copied = size - hdrlen; rc = -EINVAL; if (copied < 0) goto out; release_sock(sk); skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc); lock_sock(sk); if (!skb) goto out; if (sock_flag(sk, SOCK_ZAPPED) || llc->dev != dev || hdrlen != llc_ui_header_len(sk, addr) || hh_len != LL_RESERVED_SPACE(dev) || size > READ_ONCE(dev->mtu)) goto out; skb->dev = dev; skb->protocol = llc_proto_type(addr->sllc_arphrd); skb_reserve(skb, hh_len + hdrlen); rc = memcpy_from_msg(skb_put(skb, copied), msg, copied); if (rc) goto out; if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) { llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_mac, addr->sllc_sap); skb = NULL; goto out; } if (addr->sllc_test) { llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_mac, addr->sllc_sap); skb = NULL; goto out; } if (addr->sllc_xid) { llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_mac, addr->sllc_sap); skb = NULL; goto out; } rc = -ENOPROTOOPT; if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua)) goto out; rc = llc_ui_send_data(sk, skb, noblock); skb = NULL; out: kfree_skb(skb); if (rc) dprintk("%s: failed sending from %02X to %02X: %d\n", __func__, llc->laddr.lsap, llc->daddr.lsap, rc); release_sock(sk); return rc ? : copied; } /** * llc_ui_getname - return the address info of a socket * @sock: Socket to get address of. * @uaddr: Address structure to return information. * @peer: Does user want local or remote address information. * * Return the address information of a socket. */ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct sockaddr_llc sllc; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); int rc = -EBADF; memset(&sllc, 0, sizeof(sllc)); lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) goto out; if (peer) { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; if(llc->dev) sllc.sllc_arphrd = llc->dev->type; sllc.sllc_sap = llc->daddr.lsap; memcpy(&sllc.sllc_mac, &llc->daddr.mac, IFHWADDRLEN); } else { rc = -EINVAL; if (!llc->sap) goto out; sllc.sllc_sap = llc->sap->laddr.lsap; if (llc->dev) { sllc.sllc_arphrd = llc->dev->type; memcpy(&sllc.sllc_mac, llc->dev->dev_addr, IFHWADDRLEN); } } sllc.sllc_family = AF_LLC; memcpy(uaddr, &sllc, sizeof(sllc)); rc = sizeof(sllc); out: release_sock(sk); return rc; } /** * llc_ui_ioctl - io controls for PF_LLC * @sock: Socket to get/set info * @cmd: command * @arg: optional argument for cmd * * get/set info on llc sockets */ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { return -ENOIOCTLCMD; } /** * llc_ui_setsockopt - set various connection specific parameters. * @sock: Socket to set options on. * @level: Socket level user is requesting operations on. * @optname: Operation name. * @optval: User provided operation data. * @optlen: Length of optval. * * Set various connection specific parameters. */ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); unsigned int opt; int rc = -EINVAL; lock_sock(sk); if (unlikely(level != SOL_LLC || optlen != sizeof(int))) goto out; rc = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (rc) goto out; rc = -EINVAL; switch (optname) { case LLC_OPT_RETRY: if (opt > LLC_OPT_MAX_RETRY) goto out; llc->n2 = opt; break; case LLC_OPT_SIZE: if (opt > LLC_OPT_MAX_SIZE) goto out; llc->n1 = opt; break; case LLC_OPT_ACK_TMR_EXP: if (opt > LLC_OPT_MAX_ACK_TMR_EXP) goto out; llc->ack_timer.expire = opt * HZ; break; case LLC_OPT_P_TMR_EXP: if (opt > LLC_OPT_MAX_P_TMR_EXP) goto out; llc->pf_cycle_timer.expire = opt * HZ; break; case LLC_OPT_REJ_TMR_EXP: if (opt > LLC_OPT_MAX_REJ_TMR_EXP) goto out; llc->rej_sent_timer.expire = opt * HZ; break; case LLC_OPT_BUSY_TMR_EXP: if (opt > LLC_OPT_MAX_BUSY_TMR_EXP) goto out; llc->busy_state_timer.expire = opt * HZ; break; case LLC_OPT_TX_WIN: if (opt > LLC_OPT_MAX_WIN) goto out; llc->k = opt; break; case LLC_OPT_RX_WIN: if (opt > LLC_OPT_MAX_WIN) goto out; llc->rw = opt; break; case LLC_OPT_PKTINFO: if (opt) llc->cmsg_flags |= LLC_CMSG_PKTINFO; else llc->cmsg_flags &= ~LLC_CMSG_PKTINFO; break; default: rc = -ENOPROTOOPT; goto out; } rc = 0; out: release_sock(sk); return rc; } /** * llc_ui_getsockopt - get connection specific socket info * @sock: Socket to get information from. * @level: Socket level user is requesting operations on. * @optname: Operation name. * @optval: Variable to return operation data in. * @optlen: Length of optval. * * Get connection specific socket information. */ static int llc_ui_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); int val = 0, len = 0, rc = -EINVAL; lock_sock(sk); if (unlikely(level != SOL_LLC)) goto out; rc = get_user(len, optlen); if (rc) goto out; rc = -EINVAL; if (len != sizeof(int)) goto out; switch (optname) { case LLC_OPT_RETRY: val = llc->n2; break; case LLC_OPT_SIZE: val = llc->n1; break; case LLC_OPT_ACK_TMR_EXP: val = llc->ack_timer.expire / HZ; break; case LLC_OPT_P_TMR_EXP: val = llc->pf_cycle_timer.expire / HZ; break; case LLC_OPT_REJ_TMR_EXP: val = llc->rej_sent_timer.expire / HZ; break; case LLC_OPT_BUSY_TMR_EXP: val = llc->busy_state_timer.expire / HZ; break; case LLC_OPT_TX_WIN: val = llc->k; break; case LLC_OPT_RX_WIN: val = llc->rw; break; case LLC_OPT_PKTINFO: val = (llc->cmsg_flags & LLC_CMSG_PKTINFO) != 0; break; default: rc = -ENOPROTOOPT; goto out; } rc = 0; if (put_user(len, optlen) || copy_to_user(optval, &val, len)) rc = -EFAULT; out: release_sock(sk); return rc; } static const struct net_proto_family llc_ui_family_ops = { .family = PF_LLC, .create = llc_ui_create, .owner = THIS_MODULE, }; static const struct proto_ops llc_ui_ops = { .family = PF_LLC, .owner = THIS_MODULE, .release = llc_ui_release, .bind = llc_ui_bind, .connect = llc_ui_connect, .socketpair = sock_no_socketpair, .accept = llc_ui_accept, .getname = llc_ui_getname, .poll = datagram_poll, .ioctl = llc_ui_ioctl, .listen = llc_ui_listen, .shutdown = llc_ui_shutdown, .setsockopt = llc_ui_setsockopt, .getsockopt = llc_ui_getsockopt, .sendmsg = llc_ui_sendmsg, .recvmsg = llc_ui_recvmsg, .mmap = sock_no_mmap, }; static const char llc_proc_err_msg[] __initconst = KERN_CRIT "LLC: Unable to register the proc_fs entries\n"; static const char llc_sysctl_err_msg[] __initconst = KERN_CRIT "LLC: Unable to register the sysctl entries\n"; static const char llc_sock_err_msg[] __initconst = KERN_CRIT "LLC: Unable to register the network family\n"; static int __init llc2_init(void) { int rc = proto_register(&llc_proto, 0); if (rc != 0) goto out; llc_build_offset_table(); llc_station_init(); llc_ui_sap_last_autoport = LLC_SAP_DYN_START; rc = llc_proc_init(); if (rc != 0) { printk(llc_proc_err_msg); goto out_station; } rc = llc_sysctl_init(); if (rc) { printk(llc_sysctl_err_msg); goto out_proc; } rc = sock_register(&llc_ui_family_ops); if (rc) { printk(llc_sock_err_msg); goto out_sysctl; } llc_add_pack(LLC_DEST_SAP, llc_sap_handler); llc_add_pack(LLC_DEST_CONN, llc_conn_handler); out: return rc; out_sysctl: llc_sysctl_exit(); out_proc: llc_proc_exit(); out_station: llc_station_exit(); proto_unregister(&llc_proto); goto out; } static void __exit llc2_exit(void) { llc_station_exit(); llc_remove_pack(LLC_DEST_SAP); llc_remove_pack(LLC_DEST_CONN); sock_unregister(PF_LLC); llc_proc_exit(); llc_sysctl_exit(); proto_unregister(&llc_proto); } module_init(llc2_init); module_exit(llc2_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Procom 1997, Jay Schullist 2001, Arnaldo C. Melo 2001-2003"); MODULE_DESCRIPTION("IEEE 802.2 PF_LLC support"); MODULE_ALIAS_NETPROTO(PF_LLC);
1 97 1 97 1 4 4 1 1 1 2 215 168 96 97 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 // SPDX-License-Identifier: GPL-2.0 /* * lib/minmax.c: windowed min/max tracker * * Kathleen Nichols' algorithm for tracking the minimum (or maximum) * value of a data stream over some fixed time interval. (E.g., * the minimum RTT over the past five minutes.) It uses constant * space and constant time per update yet almost always delivers * the same minimum as an implementation that has to keep all the * data in the window. * * The algorithm keeps track of the best, 2nd best & 3rd best min * values, maintaining an invariant that the measurement time of * the n'th best >= n-1'th best. It also makes sure that the three * values are widely separated in the time window since that bounds * the worse case error when that data is monotonically increasing * over the window. * * Upon getting a new min, we can forget everything earlier because * it has no value - the new min is <= everything else in the window * by definition and it's the most recent. So we restart fresh on * every new min and overwrites 2nd & 3rd choices. The same property * holds for 2nd & 3rd best. */ #include <linux/module.h> #include <linux/win_minmax.h> /* As time advances, update the 1st, 2nd, and 3rd choices. */ static u32 minmax_subwin_update(struct minmax *m, u32 win, const struct minmax_sample *val) { u32 dt = val->t - m->s[0].t; if (unlikely(dt > win)) { /* * Passed entire window without a new val so make 2nd * choice the new val & 3rd choice the new 2nd choice. * we may have to iterate this since our 2nd choice * may also be outside the window (we checked on entry * that the third choice was in the window). */ m->s[0] = m->s[1]; m->s[1] = m->s[2]; m->s[2] = *val; if (unlikely(val->t - m->s[0].t > win)) { m->s[0] = m->s[1]; m->s[1] = m->s[2]; m->s[2] = *val; } } else if (unlikely(m->s[1].t == m->s[0].t) && dt > win/4) { /* * We've passed a quarter of the window without a new val * so take a 2nd choice from the 2nd quarter of the window. */ m->s[2] = m->s[1] = *val; } else if (unlikely(m->s[2].t == m->s[1].t) && dt > win/2) { /* * We've passed half the window without finding a new val * so take a 3rd choice from the last half of the window */ m->s[2] = *val; } return m->s[0].v; } /* Check if new measurement updates the 1st, 2nd or 3rd choice max. */ u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas) { struct minmax_sample val = { .t = t, .v = meas }; if (unlikely(val.v >= m->s[0].v) || /* found new max? */ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ return minmax_reset(m, t, meas); /* forget earlier samples */ if (unlikely(val.v >= m->s[1].v)) m->s[2] = m->s[1] = val; else if (unlikely(val.v >= m->s[2].v)) m->s[2] = val; return minmax_subwin_update(m, win, &val); } EXPORT_SYMBOL(minmax_running_max); /* Check if new measurement updates the 1st, 2nd or 3rd choice min. */ u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas) { struct minmax_sample val = { .t = t, .v = meas }; if (unlikely(val.v <= m->s[0].v) || /* found new min? */ unlikely(val.t - m->s[2].t > win)) /* nothing left in window? */ return minmax_reset(m, t, meas); /* forget earlier samples */ if (unlikely(val.v <= m->s[1].v)) m->s[2] = m->s[1] = val; else if (unlikely(val.v <= m->s[2].v)) m->s[2] = val; return minmax_subwin_update(m, win, &val); }
1 1 3 3 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 // SPDX-License-Identifier: GPL-2.0-or-later /* * v4l2-spi - SPI helpers for Video4Linux2 */ #include <linux/module.h> #include <linux/spi/spi.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> void v4l2_spi_subdev_unregister(struct v4l2_subdev *sd) { struct spi_device *spi = v4l2_get_subdevdata(sd); if (spi && !spi->dev.of_node && !spi->dev.fwnode) spi_unregister_device(spi); } void v4l2_spi_subdev_init(struct v4l2_subdev *sd, struct spi_device *spi, const struct v4l2_subdev_ops *ops) { v4l2_subdev_init(sd, ops); sd->flags |= V4L2_SUBDEV_FL_IS_SPI; /* the owner is the same as the spi_device's driver owner */ sd->owner = spi->dev.driver->owner; sd->dev = &spi->dev; /* spi_device and v4l2_subdev point to one another */ v4l2_set_subdevdata(sd, spi); spi_set_drvdata(spi, sd); /* initialize name */ snprintf(sd->name, sizeof(sd->name), "%s %s", spi->dev.driver->name, dev_name(&spi->dev)); } EXPORT_SYMBOL_GPL(v4l2_spi_subdev_init); struct v4l2_subdev *v4l2_spi_new_subdev(struct v4l2_device *v4l2_dev, struct spi_controller *ctlr, struct spi_board_info *info) { struct v4l2_subdev *sd = NULL; struct spi_device *spi = NULL; if (!v4l2_dev) return NULL; if (info->modalias[0]) request_module(info->modalias); spi = spi_new_device(ctlr, info); if (!spi || !spi->dev.driver) goto error; if (!try_module_get(spi->dev.driver->owner)) goto error; sd = spi_get_drvdata(spi); /* * Register with the v4l2_device which increases the module's * use count as well. */ if (__v4l2_device_register_subdev(v4l2_dev, sd, sd->owner)) sd = NULL; /* Decrease the module use count to match the first try_module_get. */ module_put(spi->dev.driver->owner); error: /* * If we have a client but no subdev, then something went wrong and * we must unregister the client. */ if (!sd) spi_unregister_device(spi); return sd; } EXPORT_SYMBOL_GPL(v4l2_spi_new_subdev);
3 3 3 80 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * "Ping" sockets * * Based on ipv4/ping.c code. * * Authors: Lorenzo Colitti (IPv6 support) * Vasiliy Kulikov / Openwall (IPv4 implementation, for Linux 2.6), * Pavel Kankovsky (IPv4 implementation, for Linux 2.4.32) */ #include <net/addrconf.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/protocol.h> #include <net/udp.h> #include <net/transp_v6.h> #include <linux/proc_fs.h> #include <linux/bpf-cgroup.h> #include <net/ping.h> /* Compatibility glue so we can support IPv6 when it's compiled as a module */ static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) { return -EAFNOSUPPORT; } static void dummy_ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { } static int dummy_icmpv6_err_convert(u8 type, u8 code, int *err) { return -EAFNOSUPPORT; } static void dummy_ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) {} static int dummy_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict) { return 0; } static int ping_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { /* This check is replicated from __ip6_datagram_connect() and * intended to prevent BPF program called below from accessing * bytes that are out of the bound specified by user in addr_len. */ if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; return BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, &addr_len); } static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct icmp6hdr user_icmph; int addr_type; struct in6_addr *daddr; int oif = 0; struct flowi6 fl6; int err; struct dst_entry *dst; struct rt6_info *rt; struct pingfakehdr pfh; struct ipcm6_cookie ipc6; err = ping_common_sendmsg(AF_INET6, msg, len, &user_icmph, sizeof(user_icmph)); if (err) return err; memset(&fl6, 0, sizeof(fl6)); if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name); if (msg->msg_namelen < sizeof(*u)) return -EINVAL; if (u->sin6_family != AF_INET6) { return -EAFNOSUPPORT; } daddr = &(u->sin6_addr); if (inet6_test_bit(SNDFLOW, sk)) fl6.flowlabel = u->sin6_flowinfo & IPV6_FLOWINFO_MASK; if (__ipv6_addr_needs_scope_id(ipv6_addr_type(daddr))) oif = u->sin6_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = &sk->sk_v6_daddr; fl6.flowlabel = np->flow_label; } if (!oif) oif = sk->sk_bound_dev_if; if (!oif) oif = np->sticky_pktinfo.ipi6_ifindex; if (!oif && ipv6_addr_is_multicast(daddr)) oif = READ_ONCE(np->mcast_oif); else if (!oif) oif = READ_ONCE(np->ucast_oif); addr_type = ipv6_addr_type(daddr); if ((__ipv6_addr_needs_scope_id(addr_type) && !oif) || (addr_type & IPV6_ADDR_MAPPED) || (oif && sk->sk_bound_dev_if && oif != sk->sk_bound_dev_if && l3mdev_master_ifindex_by_index(sock_net(sk), oif) != sk->sk_bound_dev_if)) return -EINVAL; ipcm6_init_sk(&ipc6, sk); ipc6.sockc.tsflags = READ_ONCE(sk->sk_tsflags); ipc6.sockc.mark = READ_ONCE(sk->sk_mark); fl6.flowi6_oif = oif; if (msg->msg_controllen) { struct ipv6_txoptions opt = {}; opt.tot_len = sizeof(opt); ipc6.opt = &opt; err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6); if (err < 0) return err; /* Changes to txoptions and flow info are not implemented, yet. * Drop the options. */ ipc6.opt = NULL; } fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.saddr = np->saddr; fl6.daddr = *daddr; fl6.flowi6_mark = ipc6.sockc.mark; fl6.flowi6_uid = sk->sk_uid; fl6.fl6_icmp_type = user_icmph.icmp6_type; fl6.fl6_icmp_code = user_icmph.icmp6_code; security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = ip6_sk_dst_lookup_flow(sk, &fl6, daddr, false); if (IS_ERR(dst)) return PTR_ERR(dst); rt = dst_rt6_info(dst); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = READ_ONCE(np->mcast_oif); else if (!fl6.flowi6_oif) fl6.flowi6_oif = READ_ONCE(np->ucast_oif); pfh.icmph.type = user_icmph.icmp6_type; pfh.icmph.code = user_icmph.icmp6_code; pfh.icmph.checksum = 0; pfh.icmph.un.echo.id = inet->inet_sport; pfh.icmph.un.echo.sequence = user_icmph.icmp6_sequence; pfh.msg = msg; pfh.wcheck = 0; pfh.family = AF_INET6; if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); lock_sock(sk); err = ip6_append_data(sk, ping_getfrag, &pfh, len, sizeof(struct icmp6hdr), &ipc6, &fl6, rt, MSG_DONTWAIT); if (err) { ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); } else { icmpv6_push_pending_frames(sk, &fl6, (struct icmp6hdr *)&pfh.icmph, len); } release_sock(sk); dst_release(dst); if (err) return err; return len; } struct proto pingv6_prot = { .name = "PINGv6", .owner = THIS_MODULE, .init = ping_init_sock, .close = ping_close, .pre_connect = ping_v6_pre_connect, .connect = ip6_datagram_connect_v6_only, .disconnect = __udp_disconnect, .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .sendmsg = ping_v6_sendmsg, .recvmsg = ping_recvmsg, .bind = ping_bind, .backlog_rcv = ping_queue_rcv_skb, .hash = ping_hash, .unhash = ping_unhash, .get_port = ping_get_port, .put_port = ping_unhash, .obj_size = sizeof(struct raw6_sock), .ipv6_pinfo_offset = offsetof(struct raw6_sock, inet6), }; EXPORT_SYMBOL_GPL(pingv6_prot); static struct inet_protosw pingv6_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_ICMPV6, .prot = &pingv6_prot, .ops = &inet6_sockraw_ops, .flags = INET_PROTOSW_REUSE, }; #ifdef CONFIG_PROC_FS static void *ping_v6_seq_start(struct seq_file *seq, loff_t *pos) { return ping_seq_start(seq, pos, AF_INET6); } static int ping_v6_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); } else { int bucket = ((struct ping_iter_state *) seq->private)->bucket; struct inet_sock *inet = inet_sk((struct sock *)v); __u16 srcp = ntohs(inet->inet_sport); __u16 destp = ntohs(inet->inet_dport); ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket); } return 0; } static const struct seq_operations ping_v6_seq_ops = { .start = ping_v6_seq_start, .show = ping_v6_seq_show, .next = ping_seq_next, .stop = ping_seq_stop, }; static int __net_init ping_v6_proc_init_net(struct net *net) { if (!proc_create_net("icmp6", 0444, net->proc_net, &ping_v6_seq_ops, sizeof(struct ping_iter_state))) return -ENOMEM; return 0; } static void __net_exit ping_v6_proc_exit_net(struct net *net) { remove_proc_entry("icmp6", net->proc_net); } static struct pernet_operations ping_v6_net_ops = { .init = ping_v6_proc_init_net, .exit = ping_v6_proc_exit_net, }; #endif int __init pingv6_init(void) { #ifdef CONFIG_PROC_FS int ret = register_pernet_subsys(&ping_v6_net_ops); if (ret) return ret; #endif pingv6_ops.ipv6_recv_error = ipv6_recv_error; pingv6_ops.ip6_datagram_recv_common_ctl = ip6_datagram_recv_common_ctl; pingv6_ops.ip6_datagram_recv_specific_ctl = ip6_datagram_recv_specific_ctl; pingv6_ops.icmpv6_err_convert = icmpv6_err_convert; pingv6_ops.ipv6_icmp_error = ipv6_icmp_error; pingv6_ops.ipv6_chk_addr = ipv6_chk_addr; return inet6_register_protosw(&pingv6_protosw); } /* This never gets called because it's not possible to unload the ipv6 module, * but just in case. */ void pingv6_exit(void) { pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error; pingv6_ops.ip6_datagram_recv_common_ctl = dummy_ip6_datagram_recv_ctl; pingv6_ops.ip6_datagram_recv_specific_ctl = dummy_ip6_datagram_recv_ctl; pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert; pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error; pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr; #ifdef CONFIG_PROC_FS unregister_pernet_subsys(&ping_v6_net_ops); #endif inet6_unregister_protosw(&pingv6_protosw); }
15 15 15 32 32 32 17 22 1 21 348 348 352 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 // SPDX-License-Identifier: GPL-2.0-only #include <linux/export.h> #include <linux/sched/signal.h> #include <linux/sched/task.h> #include <linux/fs.h> #include <linux/path.h> #include <linux/slab.h> #include <linux/fs_struct.h> #include "internal.h" /* * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. * It can block. */ void set_fs_root(struct fs_struct *fs, const struct path *path) { struct path old_root; path_get(path); spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); old_root = fs->root; fs->root = *path; write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); if (old_root.dentry) path_put(&old_root); } /* * Replace the fs->{pwdmnt,pwd} with {mnt,dentry}. Put the old values. * It can block. */ void set_fs_pwd(struct fs_struct *fs, const struct path *path) { struct path old_pwd; path_get(path); spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); old_pwd = fs->pwd; fs->pwd = *path; write_seqcount_end(&fs->seq); spin_unlock(&fs->lock); if (old_pwd.dentry) path_put(&old_pwd); } static inline int replace_path(struct path *p, const struct path *old, const struct path *new) { if (likely(p->dentry != old->dentry || p->mnt != old->mnt)) return 0; *p = *new; return 1; } void chroot_fs_refs(const struct path *old_root, const struct path *new_root) { struct task_struct *g, *p; struct fs_struct *fs; int count = 0; read_lock(&tasklist_lock); for_each_process_thread(g, p) { task_lock(p); fs = p->fs; if (fs) { int hits = 0; spin_lock(&fs->lock); write_seqcount_begin(&fs->seq); hits += replace_path(&fs->root, old_root, new_root); hits += replace_path(&fs->pwd, old_root, new_root); write_seqcount_end(&fs->seq); while (hits--) { count++; path_get(new_root); } spin_unlock(&fs->lock); } task_unlock(p); } read_unlock(&tasklist_lock); while (count--) path_put(old_root); } void free_fs_struct(struct fs_struct *fs) { path_put(&fs->root); path_put(&fs->pwd); kmem_cache_free(fs_cachep, fs); } void exit_fs(struct task_struct *tsk) { struct fs_struct *fs = tsk->fs; if (fs) { int kill; task_lock(tsk); spin_lock(&fs->lock); tsk->fs = NULL; kill = !--fs->users; spin_unlock(&fs->lock); task_unlock(tsk); if (kill) free_fs_struct(fs); } } struct fs_struct *copy_fs_struct(struct fs_struct *old) { struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); /* We don't need to lock fs - think why ;-) */ if (fs) { fs->users = 1; fs->in_exec = 0; spin_lock_init(&fs->lock); seqcount_spinlock_init(&fs->seq, &fs->lock); fs->umask = old->umask; spin_lock(&old->lock); fs->root = old->root; path_get(&fs->root); fs->pwd = old->pwd; path_get(&fs->pwd); spin_unlock(&old->lock); } return fs; } int unshare_fs_struct(void) { struct fs_struct *fs = current->fs; struct fs_struct *new_fs = copy_fs_struct(fs); int kill; if (!new_fs) return -ENOMEM; task_lock(current); spin_lock(&fs->lock); kill = !--fs->users; current->fs = new_fs; spin_unlock(&fs->lock); task_unlock(current); if (kill) free_fs_struct(fs); return 0; } EXPORT_SYMBOL_GPL(unshare_fs_struct); int current_umask(void) { return current->fs->umask; } EXPORT_SYMBOL(current_umask); /* to be mentioned only in INIT_TASK */ struct fs_struct init_fs = { .users = 1, .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), .seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock), .umask = 0022, };
2 149 283 11 101 16 127 466 106 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __SOCK_DIAG_H__ #define __SOCK_DIAG_H__ #include <linux/netlink.h> #include <linux/user_namespace.h> #include <net/net_namespace.h> #include <net/sock.h> #include <uapi/linux/sock_diag.h> struct sk_buff; struct nlmsghdr; struct sock; struct sock_diag_handler { struct module *owner; __u8 family; int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); int (*get_info)(struct sk_buff *skb, struct sock *sk); int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh); }; int sock_diag_register(const struct sock_diag_handler *h); void sock_diag_unregister(const struct sock_diag_handler *h); struct sock_diag_inet_compat { struct module *owner; int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh); }; void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr); void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr); u64 __sock_gen_cookie(struct sock *sk); static inline u64 sock_gen_cookie(struct sock *sk) { u64 cookie; preempt_disable(); cookie = __sock_gen_cookie(sk); preempt_enable(); return cookie; } int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie); void sock_diag_save_cookie(struct sock *sk, __u32 *cookie); int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr); int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, struct sk_buff *skb, int attrtype); static inline enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk) { switch (sk->sk_family) { case AF_INET: if (sk->sk_type == SOCK_RAW) return SKNLGRP_NONE; switch (sk->sk_protocol) { case IPPROTO_TCP: return SKNLGRP_INET_TCP_DESTROY; case IPPROTO_UDP: return SKNLGRP_INET_UDP_DESTROY; default: return SKNLGRP_NONE; } case AF_INET6: if (sk->sk_type == SOCK_RAW) return SKNLGRP_NONE; switch (sk->sk_protocol) { case IPPROTO_TCP: return SKNLGRP_INET6_TCP_DESTROY; case IPPROTO_UDP: return SKNLGRP_INET6_UDP_DESTROY; default: return SKNLGRP_NONE; } default: return SKNLGRP_NONE; } } static inline bool sock_diag_has_destroy_listeners(const struct sock *sk) { const struct net *n = sock_net(sk); const enum sknetlink_groups group = sock_diag_destroy_group(sk); return group != SKNLGRP_NONE && n->diag_nlsk && netlink_has_listeners(n->diag_nlsk, group); } void sock_diag_broadcast_destroy(struct sock *sk); int sock_diag_destroy(struct sock *sk, int err); #endif
71 62 62 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 // SPDX-License-Identifier: GPL-2.0 #ifndef __KVM_X86_MMU_TDP_MMU_H #define __KVM_X86_MMU_TDP_MMU_H #include <linux/kvm_host.h> #include "spte.h" void kvm_mmu_init_tdp_mmu(struct kvm *kvm); void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu); __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root) { return refcount_inc_not_zero(&root->tdp_mmu_root_count); } void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root); bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush); bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp); void kvm_tdp_mmu_zap_all(struct kvm *kvm); void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush); bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, const struct kvm_memory_slot *slot, int min_level); void kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, const struct kvm_memory_slot *slot); void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot); void kvm_tdp_mmu_recover_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot); bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level); void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t start, gfn_t end, int target_level, bool shared); static inline void kvm_tdp_mmu_walk_lockless_begin(void) { rcu_read_lock(); } static inline void kvm_tdp_mmu_walk_lockless_end(void) { rcu_read_unlock(); } int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level); u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn, u64 *spte); #ifdef CONFIG_X86_64 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } #else static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } #endif #endif /* __KVM_X86_MMU_TDP_MMU_H */
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 /* SPDX-License-Identifier: GPL-2.0-only */ /* * AppArmor security module * * This file contains AppArmor auditing function definitions. * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. */ #ifndef __AA_AUDIT_H #define __AA_AUDIT_H #include <linux/audit.h> #include <linux/fs.h> #include <linux/lsm_audit.h> #include <linux/sched.h> #include <linux/slab.h> #include "file.h" #include "label.h" extern const char *const audit_mode_names[]; #define AUDIT_MAX_INDEX 5 enum audit_mode { AUDIT_NORMAL, /* follow normal auditing of accesses */ AUDIT_QUIET_DENIED, /* quiet all denied access messages */ AUDIT_QUIET, /* quiet all messages */ AUDIT_NOQUIET, /* do not quiet audit messages */ AUDIT_ALL /* audit all accesses */ }; enum audit_type { AUDIT_APPARMOR_AUDIT, AUDIT_APPARMOR_ALLOWED, AUDIT_APPARMOR_DENIED, AUDIT_APPARMOR_HINT, AUDIT_APPARMOR_STATUS, AUDIT_APPARMOR_ERROR, AUDIT_APPARMOR_KILL, AUDIT_APPARMOR_AUTO }; #define OP_NULL NULL #define OP_SYSCTL "sysctl" #define OP_CAPABLE "capable" #define OP_UNLINK "unlink" #define OP_MKDIR "mkdir" #define OP_RMDIR "rmdir" #define OP_MKNOD "mknod" #define OP_TRUNC "truncate" #define OP_LINK "link" #define OP_SYMLINK "symlink" #define OP_RENAME_SRC "rename_src" #define OP_RENAME_DEST "rename_dest" #define OP_CHMOD "chmod" #define OP_CHOWN "chown" #define OP_GETATTR "getattr" #define OP_OPEN "open" #define OP_FRECEIVE "file_receive" #define OP_FPERM "file_perm" #define OP_FLOCK "file_lock" #define OP_FMMAP "file_mmap" #define OP_FMPROT "file_mprotect" #define OP_INHERIT "file_inherit" #define OP_PIVOTROOT "pivotroot" #define OP_MOUNT "mount" #define OP_UMOUNT "umount" #define OP_CREATE "create" #define OP_POST_CREATE "post_create" #define OP_BIND "bind" #define OP_CONNECT "connect" #define OP_LISTEN "listen" #define OP_ACCEPT "accept" #define OP_SENDMSG "sendmsg" #define OP_RECVMSG "recvmsg" #define OP_GETSOCKNAME "getsockname" #define OP_GETPEERNAME "getpeername" #define OP_GETSOCKOPT "getsockopt" #define OP_SETSOCKOPT "setsockopt" #define OP_SHUTDOWN "socket_shutdown" #define OP_PTRACE "ptrace" #define OP_SIGNAL "signal" #define OP_EXEC "exec" #define OP_CHANGE_HAT "change_hat" #define OP_CHANGE_PROFILE "change_profile" #define OP_CHANGE_ONEXEC "change_onexec" #define OP_STACK "stack" #define OP_STACK_ONEXEC "stack_onexec" #define OP_SETPROCATTR "setprocattr" #define OP_SETRLIMIT "setrlimit" #define OP_PROF_REPL "profile_replace" #define OP_PROF_LOAD "profile_load" #define OP_PROF_RM "profile_remove" #define OP_USERNS_CREATE "userns_create" #define OP_URING_OVERRIDE "uring_override" #define OP_URING_SQPOLL "uring_sqpoll" struct apparmor_audit_data { int error; int type; u16 class; const char *op; const struct cred *subj_cred; struct aa_label *subj_label; const char *name; const char *info; u32 request; u32 denied; union { /* these entries require a custom callback fn */ struct { struct aa_label *peer; union { struct { const char *target; kuid_t ouid; } fs; struct { int rlim; unsigned long max; } rlim; struct { int signal; int unmappedsig; }; struct { int type, protocol; struct sock *peer_sk; void *addr; int addrlen; } net; }; }; struct { struct aa_profile *profile; const char *ns; long pos; } iface; struct { const char *src_name; const char *type; const char *trans; const char *data; unsigned long flags; } mnt; struct { struct aa_label *target; } uring; }; struct common_audit_data common; }; /* macros for dealing with apparmor_audit_data structure */ #define aad(SA) (container_of(SA, struct apparmor_audit_data, common)) #define aad_of_va(VA) aad((struct common_audit_data *)(VA)) #define DEFINE_AUDIT_DATA(NAME, T, C, X) \ /* TODO: cleanup audit init so we don't need _aad = {0,} */ \ struct apparmor_audit_data NAME = { \ .class = (C), \ .op = (X), \ .common.type = (T), \ .common.u.tsk = NULL, \ .common.apparmor_audit_data = &NAME, \ }; void aa_audit_msg(int type, struct apparmor_audit_data *ad, void (*cb) (struct audit_buffer *, void *)); int aa_audit(int type, struct aa_profile *profile, struct apparmor_audit_data *ad, void (*cb) (struct audit_buffer *, void *)); #define aa_audit_error(ERROR, AD, CB) \ ({ \ (AD)->error = (ERROR); \ aa_audit_msg(AUDIT_APPARMOR_ERROR, (AD), (CB)); \ (AD)->error; \ }) static inline int complain_error(int error) { if (error == -EPERM || error == -EACCES) return 0; return error; } void aa_audit_rule_free(void *vrule); int aa_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule, gfp_t gfp); int aa_audit_rule_known(struct audit_krule *rule); int aa_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op, void *vrule); #endif /* __AA_AUDIT_H */
425 426 413 413 182 25 266 403 377 155 1 176 282 284 257 5 283 412 413 246 247 246 157 155 156 32 30 412 14 33 397 9 413 67 2 2 9 410 413 21 412 111 91 9 413 415 416 417 111 411 413 49 412 16 16 16 16 16 16 16 285 283 40 100 58 279 285 9 283 105 280 284 9 16 16 16 284 17 83 237 411 411 2 413 413 285 284 285 284 2 2 5 145 255 283 58 282 58 413 187 187 33 105 105 45 3 22 9 411 388 117 31 52 1 53 13 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 // SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * * This file is part of the SCTP kernel implementation * * These functions handle output processing. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@austin.ibm.com> * Sridhar Samudrala <sri@us.ibm.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include <linux/kernel.h> #include <linux/wait.h> #include <linux/time.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/init.h> #include <linux/slab.h> #include <net/inet_ecn.h> #include <net/ip.h> #include <net/icmp.h> #include <net/net_namespace.h> #include <linux/socket.h> /* for sa_family_t */ #include <net/sock.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> #include <net/sctp/checksum.h> /* Forward declarations for private helpers. */ static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk); static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk); static void sctp_packet_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk); static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, struct sctp_chunk *chunk, u16 chunk_len); static void sctp_packet_reset(struct sctp_packet *packet) { /* sctp_packet_transmit() relies on this to reset size to the * current overhead after sending packets. */ packet->size = packet->overhead; packet->has_cookie_echo = 0; packet->has_sack = 0; packet->has_data = 0; packet->has_auth = 0; packet->ipfragok = 0; packet->auth = NULL; } /* Config a packet. * This appears to be a followup set of initializations. */ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, int ecn_capable) { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; struct sctp_sock *sp = NULL; struct sock *sk; pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); packet->vtag = vtag; /* do the following jobs only once for a flush schedule */ if (!sctp_packet_empty(packet)) return; /* set packet max_size with pathmtu, then calculate overhead */ packet->max_size = tp->pathmtu; if (asoc) { sk = asoc->base.sk; sp = sctp_sk(sk); } packet->overhead = sctp_mtu_payload(sp, 0, 0); packet->size = packet->overhead; if (!asoc) return; /* update dst or transport pathmtu if in need */ if (!sctp_transport_dst_check(tp)) { sctp_transport_route(tp, NULL, sp); if (asoc->param_flags & SPP_PMTUD_ENABLE) sctp_assoc_sync_pmtu(asoc); } else if (!sctp_transport_pl_enabled(tp) && asoc->param_flags & SPP_PMTUD_ENABLE) { if (!sctp_transport_pmtu_check(tp)) sctp_assoc_sync_pmtu(asoc); } if (asoc->pmtu_pending) { if (asoc->param_flags & SPP_PMTUD_ENABLE) sctp_assoc_sync_pmtu(asoc); asoc->pmtu_pending = 0; } /* If there a is a prepend chunk stick it on the list before * any other chunks get appended. */ if (ecn_capable) { struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc); if (chunk) sctp_packet_append_chunk(packet, chunk); } if (!tp->dst) return; /* set packet max_size with gso_max_size if gso is enabled*/ rcu_read_lock(); if (__sk_dst_get(sk) != tp->dst) { dst_hold(tp->dst); sk_setup_caps(sk, tp->dst); } packet->max_size = sk_can_gso(sk) ? min(READ_ONCE(tp->dst->dev->gso_max_size), GSO_LEGACY_MAX_SIZE) : asoc->pathmtu; rcu_read_unlock(); } /* Initialize the packet structure. */ void sctp_packet_init(struct sctp_packet *packet, struct sctp_transport *transport, __u16 sport, __u16 dport) { pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport); packet->transport = transport; packet->source_port = sport; packet->destination_port = dport; INIT_LIST_HEAD(&packet->chunk_list); /* The overhead will be calculated by sctp_packet_config() */ packet->overhead = 0; sctp_packet_reset(packet); packet->vtag = 0; } /* Free a packet. */ void sctp_packet_free(struct sctp_packet *packet) { struct sctp_chunk *chunk, *tmp; pr_debug("%s: packet:%p\n", __func__, packet); list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); sctp_chunk_free(chunk); } } /* This routine tries to append the chunk to the offered packet. If adding * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk * is not present in the packet, it transmits the input packet. * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long * as it can fit in the packet, but any more data that does not fit in this * packet can be sent only after receiving the COOKIE_ACK. */ enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk, int one_packet, gfp_t gfp) { enum sctp_xmit retval; pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__, packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1); switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { case SCTP_XMIT_PMTU_FULL: if (!packet->has_cookie_echo) { int error = 0; error = sctp_packet_transmit(packet, gfp); if (error < 0) chunk->skb->sk->sk_err = -error; /* If we have an empty packet, then we can NOT ever * return PMTU_FULL. */ if (!one_packet) retval = sctp_packet_append_chunk(packet, chunk); } break; case SCTP_XMIT_RWND_FULL: case SCTP_XMIT_OK: case SCTP_XMIT_DELAY: break; } return retval; } /* Try to bundle a pad chunk into a packet with a heartbeat chunk for PLPMTUTD probe */ static enum sctp_xmit sctp_packet_bundle_pad(struct sctp_packet *pkt, struct sctp_chunk *chunk) { struct sctp_transport *t = pkt->transport; struct sctp_chunk *pad; int overhead = 0; if (!chunk->pmtu_probe) return SCTP_XMIT_OK; /* calculate the Padding Data size for the pad chunk */ overhead += sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); overhead += sizeof(struct sctp_sender_hb_info) + sizeof(struct sctp_pad_chunk); pad = sctp_make_pad(t->asoc, t->pl.probe_size - overhead); if (!pad) return SCTP_XMIT_DELAY; list_add_tail(&pad->list, &pkt->chunk_list); pkt->size += SCTP_PAD4(ntohs(pad->chunk_hdr->length)); chunk->transport = t; return SCTP_XMIT_OK; } /* Try to bundle an auth chunk into the packet. */ static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt, struct sctp_chunk *chunk) { struct sctp_association *asoc = pkt->transport->asoc; enum sctp_xmit retval = SCTP_XMIT_OK; struct sctp_chunk *auth; /* if we don't have an association, we can't do authentication */ if (!asoc) return retval; /* See if this is an auth chunk we are bundling or if * auth is already bundled. */ if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) return retval; /* if the peer did not request this chunk to be authenticated, * don't do it */ if (!chunk->auth) return retval; auth = sctp_make_auth(asoc, chunk->shkey->key_id); if (!auth) return retval; auth->shkey = chunk->shkey; sctp_auth_shkey_hold(auth->shkey); retval = __sctp_packet_append_chunk(pkt, auth); if (retval != SCTP_XMIT_OK) sctp_chunk_free(auth); return retval; } /* Try to bundle a SACK with the packet. */ static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt, struct sctp_chunk *chunk) { enum sctp_xmit retval = SCTP_XMIT_OK; /* If sending DATA and haven't aleady bundled a SACK, try to * bundle one in to the packet. */ if (sctp_chunk_is_data(chunk) && !pkt->has_sack && !pkt->has_cookie_echo) { struct sctp_association *asoc; struct timer_list *timer; asoc = pkt->transport->asoc; timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; /* If the SACK timer is running, we have a pending SACK */ if (timer_pending(timer)) { struct sctp_chunk *sack; if (pkt->transport->sack_generation != pkt->transport->asoc->peer.sack_generation) return retval; asoc->a_rwnd = asoc->rwnd; sack = sctp_make_sack(asoc); if (sack) { retval = __sctp_packet_append_chunk(pkt, sack); if (retval != SCTP_XMIT_OK) { sctp_chunk_free(sack); goto out; } SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS); asoc->stats.octrlchunks++; asoc->peer.sack_needed = 0; if (del_timer(timer)) sctp_association_put(asoc); } } } out: return retval; } /* Append a chunk to the offered packet reporting back any inability to do * so. */ static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk) { __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length)); enum sctp_xmit retval = SCTP_XMIT_OK; /* Check to see if this chunk will fit into the packet */ retval = sctp_packet_will_fit(packet, chunk, chunk_len); if (retval != SCTP_XMIT_OK) goto finish; /* We believe that this chunk is OK to add to the packet */ switch (chunk->chunk_hdr->type) { case SCTP_CID_DATA: case SCTP_CID_I_DATA: /* Account for the data being in the packet */ sctp_packet_append_data(packet, chunk); /* Disallow SACK bundling after DATA. */ packet->has_sack = 1; /* Disallow AUTH bundling after DATA */ packet->has_auth = 1; /* Let it be knows that packet has DATA in it */ packet->has_data = 1; /* timestamp the chunk for rtx purposes */ chunk->sent_at = jiffies; /* Mainly used for prsctp RTX policy */ chunk->sent_count++; break; case SCTP_CID_COOKIE_ECHO: packet->has_cookie_echo = 1; break; case SCTP_CID_SACK: packet->has_sack = 1; if (chunk->asoc) chunk->asoc->stats.osacks++; break; case SCTP_CID_AUTH: packet->has_auth = 1; packet->auth = chunk; break; } /* It is OK to send this chunk. */ list_add_tail(&chunk->list, &packet->chunk_list); packet->size += chunk_len; chunk->transport = packet->transport; finish: return retval; } /* Append a chunk to the offered packet reporting back any inability to do * so. */ enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet, struct sctp_chunk *chunk) { enum sctp_xmit retval = SCTP_XMIT_OK; pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); /* Data chunks are special. Before seeing what else we can * bundle into this packet, check to see if we are allowed to * send this DATA. */ if (sctp_chunk_is_data(chunk)) { retval = sctp_packet_can_append_data(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; } /* Try to bundle AUTH chunk */ retval = sctp_packet_bundle_auth(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; /* Try to bundle SACK chunk */ retval = sctp_packet_bundle_sack(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; retval = __sctp_packet_append_chunk(packet, chunk); if (retval != SCTP_XMIT_OK) goto finish; retval = sctp_packet_bundle_pad(packet, chunk); finish: return retval; } static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) { if (SCTP_OUTPUT_CB(head)->last == head) skb_shinfo(head)->frag_list = skb; else SCTP_OUTPUT_CB(head)->last->next = skb; SCTP_OUTPUT_CB(head)->last = skb; head->truesize += skb->truesize; head->data_len += skb->len; head->len += skb->len; refcount_add(skb->truesize, &head->sk->sk_wmem_alloc); __skb_header_release(skb); } static int sctp_packet_pack(struct sctp_packet *packet, struct sk_buff *head, int gso, gfp_t gfp) { struct sctp_transport *tp = packet->transport; struct sctp_auth_chunk *auth = NULL; struct sctp_chunk *chunk, *tmp; int pkt_count = 0, pkt_size; struct sock *sk = head->sk; struct sk_buff *nskb; int auth_len = 0; if (gso) { skb_shinfo(head)->gso_type = sk->sk_gso_type; SCTP_OUTPUT_CB(head)->last = head; } else { nskb = head; pkt_size = packet->size; goto merge; } do { /* calculate the pkt_size and alloc nskb */ pkt_size = packet->overhead; list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { int padded = SCTP_PAD4(chunk->skb->len); if (chunk == packet->auth) auth_len = padded; else if (auth_len + padded + packet->overhead > tp->pathmtu) return 0; else if (pkt_size + padded > tp->pathmtu) break; pkt_size += padded; } nskb = alloc_skb(pkt_size + MAX_HEADER, gfp); if (!nskb) return 0; skb_reserve(nskb, packet->overhead + MAX_HEADER); merge: /* merge chunks into nskb and append nskb into head list */ pkt_size -= packet->overhead; list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { int padding; list_del_init(&chunk->list); if (sctp_chunk_is_data(chunk)) { if (!sctp_chunk_retransmitted(chunk) && !tp->rto_pending) { chunk->rtt_in_progress = 1; tp->rto_pending = 1; } } padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len; if (padding) skb_put_zero(chunk->skb, padding); if (chunk == packet->auth) auth = (struct sctp_auth_chunk *) skb_tail_pointer(nskb); skb_put_data(nskb, chunk->skb->data, chunk->skb->len); pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n", chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), chunk->has_tsn ? "TSN" : "No TSN", chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, ntohs(chunk->chunk_hdr->length), chunk->skb->len, chunk->rtt_in_progress); pkt_size -= SCTP_PAD4(chunk->skb->len); if (!sctp_chunk_is_data(chunk) && chunk != packet->auth) sctp_chunk_free(chunk); if (!pkt_size) break; } if (auth) { sctp_auth_calculate_hmac(tp->asoc, nskb, auth, packet->auth->shkey, gfp); /* free auth if no more chunks, or add it back */ if (list_empty(&packet->chunk_list)) sctp_chunk_free(packet->auth); else list_add(&packet->auth->list, &packet->chunk_list); } if (gso) sctp_packet_gso_append(head, nskb); pkt_count++; } while (!list_empty(&packet->chunk_list)); if (gso) { memset(head->cb, 0, max(sizeof(struct inet_skb_parm), sizeof(struct inet6_skb_parm))); skb_shinfo(head)->gso_segs = pkt_count; skb_shinfo(head)->gso_size = GSO_BY_FRAGS; goto chksum; } if (sctp_checksum_disable) return 1; if (!(tp->dst->dev->features & NETIF_F_SCTP_CRC) || dst_xfrm(tp->dst) || packet->ipfragok || tp->encap_port) { struct sctphdr *sh = (struct sctphdr *)skb_transport_header(head); sh->checksum = sctp_compute_cksum(head, 0); } else { chksum: head->ip_summed = CHECKSUM_PARTIAL; head->csum_not_inet = 1; head->csum_start = skb_transport_header(head) - head->head; head->csum_offset = offsetof(struct sctphdr, checksum); } return pkt_count; } /* All packets are sent to the network through this function from * sctp_outq_tail(). * * The return value is always 0 for now. */ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) { struct sctp_transport *tp = packet->transport; struct sctp_association *asoc = tp->asoc; struct sctp_chunk *chunk, *tmp; int pkt_count, gso = 0; struct sk_buff *head; struct sctphdr *sh; struct sock *sk; pr_debug("%s: packet:%p\n", __func__, packet); if (list_empty(&packet->chunk_list)) return 0; chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); sk = chunk->skb->sk; if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) { if (tp->pl.state == SCTP_PL_ERROR) { /* do IP fragmentation if in Error state */ packet->ipfragok = 1; } else { if (!sk_can_gso(sk)) { /* check gso */ pr_err_once("Trying to GSO but underlying device doesn't support it."); goto out; } gso = 1; } } /* alloc head skb */ head = alloc_skb((gso ? packet->overhead : packet->size) + MAX_HEADER, gfp); if (!head) goto out; skb_reserve(head, packet->overhead + MAX_HEADER); skb_set_owner_w(head, sk); /* set sctp header */ sh = skb_push(head, sizeof(struct sctphdr)); skb_reset_transport_header(head); sh->source = htons(packet->source_port); sh->dest = htons(packet->destination_port); sh->vtag = htonl(packet->vtag); sh->checksum = 0; /* drop packet if no dst */ if (!tp->dst) { IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); kfree_skb(head); goto out; } /* pack up chunks */ pkt_count = sctp_packet_pack(packet, head, gso, gfp); if (!pkt_count) { kfree_skb(head); goto out; } pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len); /* start autoclose timer */ if (packet->has_data && sctp_state(asoc, ESTABLISHED) && asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { struct timer_list *timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; unsigned long timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; if (!mod_timer(timer, jiffies + timeout)) sctp_association_hold(asoc); } /* sctp xmit */ tp->af_specific->ecn_capable(sk); if (asoc) { asoc->stats.opackets += pkt_count; if (asoc->peer.last_sent_to != tp) asoc->peer.last_sent_to = tp; } head->ignore_df = packet->ipfragok; if (tp->dst_pending_confirm) skb_set_dst_pending_confirm(head, 1); /* neighbour should be confirmed on successful transmission or * positive error */ if (tp->af_specific->sctp_xmit(head, tp) >= 0 && tp->dst_pending_confirm) tp->dst_pending_confirm = 0; out: list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { list_del_init(&chunk->list); if (!sctp_chunk_is_data(chunk)) sctp_chunk_free(chunk); } sctp_packet_reset(packet); return 0; } /******************************************************************** * 2nd Level Abstractions ********************************************************************/ /* This private function check to see if a chunk can be added */ static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) { size_t datasize, rwnd, inflight, flight_size; struct sctp_transport *transport = packet->transport; struct sctp_association *asoc = transport->asoc; struct sctp_outq *q = &asoc->outqueue; /* RFC 2960 6.1 Transmission of DATA Chunks * * A) At any given time, the data sender MUST NOT transmit new data to * any destination transport address if its peer's rwnd indicates * that the peer has no buffer space (i.e. rwnd is 0, see Section * 6.2.1). However, regardless of the value of rwnd (including if it * is 0), the data sender can always have one DATA chunk in flight to * the receiver if allowed by cwnd (see rule B below). This rule * allows the sender to probe for a change in rwnd that the sender * missed due to the SACK having been lost in transit from the data * receiver to the data sender. */ rwnd = asoc->peer.rwnd; inflight = q->outstanding_bytes; flight_size = transport->flight_size; datasize = sctp_data_size(chunk); if (datasize > rwnd && inflight > 0) /* We have (at least) one data chunk in flight, * so we can't fall back to rule 6.1 B). */ return SCTP_XMIT_RWND_FULL; /* RFC 2960 6.1 Transmission of DATA Chunks * * B) At any given time, the sender MUST NOT transmit new data * to a given transport address if it has cwnd or more bytes * of data outstanding to that transport address. */ /* RFC 7.2.4 & the Implementers Guide 2.8. * * 3) ... * When a Fast Retransmit is being performed the sender SHOULD * ignore the value of cwnd and SHOULD NOT delay retransmission. */ if (chunk->fast_retransmit != SCTP_NEED_FRTX && flight_size >= transport->cwnd) return SCTP_XMIT_RWND_FULL; /* Nagle's algorithm to solve small-packet problem: * Inhibit the sending of new chunks when new outgoing data arrives * if any previously transmitted data on the connection remains * unacknowledged. */ if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && !asoc->force_delay) /* Nothing unacked */ return SCTP_XMIT_OK; if (!sctp_packet_empty(packet)) /* Append to packet */ return SCTP_XMIT_OK; if (!sctp_state(asoc, ESTABLISHED)) return SCTP_XMIT_OK; /* Check whether this chunk and all the rest of pending data will fit * or delay in hopes of bundling a full sized packet. */ if (chunk->skb->len + q->out_qlen > transport->pathmtu - packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4) /* Enough data queued to fill a packet */ return SCTP_XMIT_OK; /* Don't delay large message writes that may have been fragmented */ if (!chunk->msg->can_delay) return SCTP_XMIT_OK; /* Defer until all data acked or packet full */ return SCTP_XMIT_DELAY; } /* This private function does management things when adding DATA chunk */ static void sctp_packet_append_data(struct sctp_packet *packet, struct sctp_chunk *chunk) { struct sctp_transport *transport = packet->transport; size_t datasize = sctp_data_size(chunk); struct sctp_association *asoc = transport->asoc; u32 rwnd = asoc->peer.rwnd; /* Keep track of how many bytes are in flight over this transport. */ transport->flight_size += datasize; /* Keep track of how many bytes are in flight to the receiver. */ asoc->outqueue.outstanding_bytes += datasize; /* Update our view of the receiver's rwnd. */ if (datasize < rwnd) rwnd -= datasize; else rwnd = 0; asoc->peer.rwnd = rwnd; sctp_chunk_assign_tsn(chunk); asoc->stream.si->assign_number(chunk); } static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet, struct sctp_chunk *chunk, u16 chunk_len) { enum sctp_xmit retval = SCTP_XMIT_OK; size_t psize, pmtu, maxsize; /* Don't bundle in this packet if this chunk's auth key doesn't * match other chunks already enqueued on this packet. Also, * don't bundle the chunk with auth key if other chunks in this * packet don't have auth key. */ if ((packet->auth && chunk->shkey != packet->auth->shkey) || (!packet->auth && chunk->shkey && chunk->chunk_hdr->type != SCTP_CID_AUTH)) return SCTP_XMIT_PMTU_FULL; psize = packet->size; if (packet->transport->asoc) pmtu = packet->transport->asoc->pathmtu; else pmtu = packet->transport->pathmtu; /* Decide if we need to fragment or resubmit later. */ if (psize + chunk_len > pmtu) { /* It's OK to fragment at IP level if any one of the following * is true: * 1. The packet is empty (meaning this chunk is greater * the MTU) * 2. The packet doesn't have any data in it yet and data * requires authentication. */ if (sctp_packet_empty(packet) || (!packet->has_data && chunk->auth)) { /* We no longer do re-fragmentation. * Just fragment at the IP layer, if we * actually hit this condition */ packet->ipfragok = 1; goto out; } /* Similarly, if this chunk was built before a PMTU * reduction, we have to fragment it at IP level now. So * if the packet already contains something, we need to * flush. */ maxsize = pmtu - packet->overhead; if (packet->auth) maxsize -= SCTP_PAD4(packet->auth->skb->len); if (chunk_len > maxsize) retval = SCTP_XMIT_PMTU_FULL; /* It is also okay to fragment if the chunk we are * adding is a control chunk, but only if current packet * is not a GSO one otherwise it causes fragmentation of * a large frame. So in this case we allow the * fragmentation by forcing it to be in a new packet. */ if (!sctp_chunk_is_data(chunk) && packet->has_data) retval = SCTP_XMIT_PMTU_FULL; if (psize + chunk_len > packet->max_size) /* Hit GSO/PMTU limit, gotta flush */ retval = SCTP_XMIT_PMTU_FULL; if (!packet->transport->burst_limited && psize + chunk_len > (packet->transport->cwnd >> 1)) /* Do not allow a single GSO packet to use more * than half of cwnd. */ retval = SCTP_XMIT_PMTU_FULL; if (packet->transport->burst_limited && psize + chunk_len > (packet->transport->burst_limited >> 1)) /* Do not allow a single GSO packet to use more * than half of original cwnd. */ retval = SCTP_XMIT_PMTU_FULL; /* Otherwise it will fit in the GSO packet */ } out: return retval; }
507 507 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_TEXT_PATCHING_H #define _ASM_X86_TEXT_PATCHING_H #include <linux/types.h> #include <linux/stddef.h> #include <asm/ptrace.h> /* * Currently, the max observed size in the kernel code is * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. * Raise it if needed. */ #define POKE_MAX_OPCODE_SIZE 5 extern void text_poke_early(void *addr, const void *opcode, size_t len); extern void apply_relocation(u8 *buf, const u8 * const instr, size_t instrlen, u8 *repl, size_t repl_len); /* * Clear and restore the kernel write-protection flag on the local CPU. * Allows the kernel to edit read-only pages. * Side-effect: any interrupt handler running between save and restore will have * the ability to write to read-only pages. * * Warning: * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and * no thread can be preempted in the instructions being modified (no iret to an * invalid instruction possible) or if the instructions are changed from a * consistent state to another consistent state atomically. * On the local CPU you need to be protected against NMI or MCE handlers seeing * an inconsistent instruction while you patch. */ extern void *text_poke(void *addr, const void *opcode, size_t len); extern void text_poke_sync(void); extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len); extern void *text_poke_copy(void *addr, const void *opcode, size_t len); #define text_poke_copy text_poke_copy extern void *text_poke_copy_locked(void *addr, const void *opcode, size_t len, bool core_ok); extern void *text_poke_set(void *addr, int c, size_t len); extern int poke_int3_handler(struct pt_regs *regs); extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate); extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate); extern void text_poke_finish(void); #define INT3_INSN_SIZE 1 #define INT3_INSN_OPCODE 0xCC #define RET_INSN_SIZE 1 #define RET_INSN_OPCODE 0xC3 #define CALL_INSN_SIZE 5 #define CALL_INSN_OPCODE 0xE8 #define JMP32_INSN_SIZE 5 #define JMP32_INSN_OPCODE 0xE9 #define JMP8_INSN_SIZE 2 #define JMP8_INSN_OPCODE 0xEB #define DISP32_SIZE 4 static __always_inline int text_opcode_size(u8 opcode) { int size = 0; #define __CASE(insn) \ case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break switch(opcode) { __CASE(INT3); __CASE(RET); __CASE(CALL); __CASE(JMP32); __CASE(JMP8); } #undef __CASE return size; } union text_poke_insn { u8 text[POKE_MAX_OPCODE_SIZE]; struct { u8 opcode; s32 disp; } __attribute__((packed)); }; static __always_inline void __text_gen_insn(void *buf, u8 opcode, const void *addr, const void *dest, int size) { union text_poke_insn *insn = buf; BUG_ON(size < text_opcode_size(opcode)); /* * Hide the addresses to avoid the compiler folding in constants when * referencing code, these can mess up annotations like * ANNOTATE_NOENDBR. */ OPTIMIZER_HIDE_VAR(insn); OPTIMIZER_HIDE_VAR(addr); OPTIMIZER_HIDE_VAR(dest); insn->opcode = opcode; if (size > 1) { insn->disp = (long)dest - (long)(addr + size); if (size == 2) { /* * Ensure that for JMP8 the displacement * actually fits the signed byte. */ BUG_ON((insn->disp >> 31) != (insn->disp >> 7)); } } } static __always_inline void *text_gen_insn(u8 opcode, const void *addr, const void *dest) { static union text_poke_insn insn; /* per instance */ __text_gen_insn(&insn, opcode, addr, dest, text_opcode_size(opcode)); return &insn.text; } extern int after_bootmem; extern __ro_after_init struct mm_struct *poking_mm; extern __ro_after_init unsigned long poking_addr; #ifndef CONFIG_UML_X86 static __always_inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip) { regs->ip = ip; } static __always_inline void int3_emulate_push(struct pt_regs *regs, unsigned long val) { /* * The int3 handler in entry_64.S adds a gap between the * stack where the break point happened, and the saving of * pt_regs. We can extend the original stack because of * this gap. See the idtentry macro's create_gap option. * * Similarly entry_32.S will have a gap on the stack for (any) hardware * exception and pt_regs; see FIXUP_FRAME. */ regs->sp -= sizeof(unsigned long); *(unsigned long *)regs->sp = val; } static __always_inline unsigned long int3_emulate_pop(struct pt_regs *regs) { unsigned long val = *(unsigned long *)regs->sp; regs->sp += sizeof(unsigned long); return val; } static __always_inline void int3_emulate_call(struct pt_regs *regs, unsigned long func) { int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE); int3_emulate_jmp(regs, func); } static __always_inline void int3_emulate_ret(struct pt_regs *regs) { unsigned long ip = int3_emulate_pop(regs); int3_emulate_jmp(regs, ip); } static __always_inline void int3_emulate_jcc(struct pt_regs *regs, u8 cc, unsigned long ip, unsigned long disp) { static const unsigned long jcc_mask[6] = { [0] = X86_EFLAGS_OF, [1] = X86_EFLAGS_CF, [2] = X86_EFLAGS_ZF, [3] = X86_EFLAGS_CF | X86_EFLAGS_ZF, [4] = X86_EFLAGS_SF, [5] = X86_EFLAGS_PF, }; bool invert = cc & 1; bool match; if (cc < 0xc) { match = regs->flags & jcc_mask[cc >> 1]; } else { match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^ ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT); if (cc >= 0xe) match = match || (regs->flags & X86_EFLAGS_ZF); } if ((match && !invert) || (!match && invert)) ip += disp; int3_emulate_jmp(regs, ip); } #endif /* !CONFIG_UML_X86 */ #endif /* _ASM_X86_TEXT_PATCHING_H */
383 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Supervisor Mode Access Prevention support * * Copyright (C) 2012 Intel Corporation * Author: H. Peter Anvin <hpa@linux.intel.com> */ #ifndef _ASM_X86_SMAP_H #define _ASM_X86_SMAP_H #include <asm/nops.h> #include <asm/cpufeatures.h> #include <asm/alternative.h> #ifdef __ASSEMBLY__ #define ASM_CLAC \ ALTERNATIVE "", "clac", X86_FEATURE_SMAP #define ASM_STAC \ ALTERNATIVE "", "stac", X86_FEATURE_SMAP #else /* __ASSEMBLY__ */ static __always_inline void clac(void) { /* Note: a barrier is implicit in alternative() */ alternative("", "clac", X86_FEATURE_SMAP); } static __always_inline void stac(void) { /* Note: a barrier is implicit in alternative() */ alternative("", "stac", X86_FEATURE_SMAP); } static __always_inline unsigned long smap_save(void) { unsigned long flags; asm volatile ("# smap_save\n\t" ALTERNATIVE("", "pushf; pop %0; " "clac" "\n\t", X86_FEATURE_SMAP) : "=rm" (flags) : : "memory", "cc"); return flags; } static __always_inline void smap_restore(unsigned long flags) { asm volatile ("# smap_restore\n\t" ALTERNATIVE("", "push %0; popf\n\t", X86_FEATURE_SMAP) : : "g" (flags) : "memory", "cc"); } /* These macros can be used in asm() statements */ #define ASM_CLAC \ ALTERNATIVE("", "clac", X86_FEATURE_SMAP) #define ASM_STAC \ ALTERNATIVE("", "stac", X86_FEATURE_SMAP) #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_SMAP_H */
5 4 1 5 4 4 3 2 1 1 1 3 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 // SPDX-License-Identifier: GPL-2.0+ /* * REINER SCT cyberJack pinpad/e-com USB Chipcard Reader Driver * * Copyright (C) 2001 REINER SCT * Author: Matthias Bruestle * * Contact: support@reiner-sct.com (see MAINTAINERS) * * This program is largely derived from work by the linux-usb group * and associated source files. Please see the usb/serial files for * individual credits and copyrights. * * Thanks to Greg Kroah-Hartman (greg@kroah.com) for his help and * patience. * * In case of problems, please write to the contact e-mail address * mentioned above. * * Please note that later models of the cyberjack reader family are * supported by a libusb-based userspace device driver. * * Homepage: http://www.reiner-sct.de/support/treiber_cyberjack.php#linux */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define CYBERJACK_LOCAL_BUF_SIZE 32 #define DRIVER_AUTHOR "Matthias Bruestle" #define DRIVER_DESC "REINER SCT cyberJack pinpad/e-com USB Chipcard Reader Driver" #define CYBERJACK_VENDOR_ID 0x0C4B #define CYBERJACK_PRODUCT_ID 0x0100 /* Function prototypes */ static int cyberjack_port_probe(struct usb_serial_port *port); static void cyberjack_port_remove(struct usb_serial_port *port); static int cyberjack_open(struct tty_struct *tty, struct usb_serial_port *port); static void cyberjack_close(struct usb_serial_port *port); static int cyberjack_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count); static unsigned int cyberjack_write_room(struct tty_struct *tty); static void cyberjack_read_int_callback(struct urb *urb); static void cyberjack_read_bulk_callback(struct urb *urb); static void cyberjack_write_bulk_callback(struct urb *urb); static const struct usb_device_id id_table[] = { { USB_DEVICE(CYBERJACK_VENDOR_ID, CYBERJACK_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver cyberjack_device = { .driver = { .name = "cyberjack", }, .description = "Reiner SCT Cyberjack USB card reader", .id_table = id_table, .num_ports = 1, .num_bulk_out = 1, .port_probe = cyberjack_port_probe, .port_remove = cyberjack_port_remove, .open = cyberjack_open, .close = cyberjack_close, .write = cyberjack_write, .write_room = cyberjack_write_room, .read_int_callback = cyberjack_read_int_callback, .read_bulk_callback = cyberjack_read_bulk_callback, .write_bulk_callback = cyberjack_write_bulk_callback, }; static struct usb_serial_driver * const serial_drivers[] = { &cyberjack_device, NULL }; struct cyberjack_private { spinlock_t lock; /* Lock for SMP */ short rdtodo; /* Bytes still to read */ unsigned char wrbuf[5*64]; /* Buffer for collecting data to write */ short wrfilled; /* Overall data size we already got */ short wrsent; /* Data already sent */ }; static int cyberjack_port_probe(struct usb_serial_port *port) { struct cyberjack_private *priv; int result; priv = kmalloc(sizeof(struct cyberjack_private), GFP_KERNEL); if (!priv) return -ENOMEM; spin_lock_init(&priv->lock); priv->rdtodo = 0; priv->wrfilled = 0; priv->wrsent = 0; usb_set_serial_port_data(port, priv); result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) dev_err(&port->dev, "usb_submit_urb(read int) failed\n"); return 0; } static void cyberjack_port_remove(struct usb_serial_port *port) { struct cyberjack_private *priv; usb_kill_urb(port->interrupt_in_urb); priv = usb_get_serial_port_data(port); kfree(priv); } static int cyberjack_open(struct tty_struct *tty, struct usb_serial_port *port) { struct cyberjack_private *priv; unsigned long flags; dev_dbg(&port->dev, "%s - usb_clear_halt\n", __func__); usb_clear_halt(port->serial->dev, port->write_urb->pipe); priv = usb_get_serial_port_data(port); spin_lock_irqsave(&priv->lock, flags); priv->rdtodo = 0; priv->wrfilled = 0; priv->wrsent = 0; spin_unlock_irqrestore(&priv->lock, flags); return 0; } static void cyberjack_close(struct usb_serial_port *port) { usb_kill_urb(port->write_urb); usb_kill_urb(port->read_urb); } static int cyberjack_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct device *dev = &port->dev; struct cyberjack_private *priv = usb_get_serial_port_data(port); unsigned long flags; int result; int wrexpected; if (count == 0) { dev_dbg(dev, "%s - write request of 0 bytes\n", __func__); return 0; } if (!test_and_clear_bit(0, &port->write_urbs_free)) { dev_dbg(dev, "%s - already writing\n", __func__); return 0; } spin_lock_irqsave(&priv->lock, flags); if (count+priv->wrfilled > sizeof(priv->wrbuf)) { /* To much data for buffer. Reset buffer. */ priv->wrfilled = 0; spin_unlock_irqrestore(&priv->lock, flags); set_bit(0, &port->write_urbs_free); return 0; } /* Copy data */ memcpy(priv->wrbuf + priv->wrfilled, buf, count); usb_serial_debug_data(dev, __func__, count, priv->wrbuf + priv->wrfilled); priv->wrfilled += count; if (priv->wrfilled >= 3) { wrexpected = ((int)priv->wrbuf[2]<<8)+priv->wrbuf[1]+3; dev_dbg(dev, "%s - expected data: %d\n", __func__, wrexpected); } else wrexpected = sizeof(priv->wrbuf); if (priv->wrfilled >= wrexpected) { /* We have enough data to begin transmission */ int length; dev_dbg(dev, "%s - transmitting data (frame 1)\n", __func__); length = (wrexpected > port->bulk_out_size) ? port->bulk_out_size : wrexpected; memcpy(port->write_urb->transfer_buffer, priv->wrbuf, length); priv->wrsent = length; /* set up our urb */ port->write_urb->transfer_buffer_length = length; /* send the data out the bulk port */ result = usb_submit_urb(port->write_urb, GFP_ATOMIC); if (result) { dev_err(&port->dev, "%s - failed submitting write urb, error %d\n", __func__, result); /* Throw away data. No better idea what to do with it. */ priv->wrfilled = 0; priv->wrsent = 0; spin_unlock_irqrestore(&priv->lock, flags); set_bit(0, &port->write_urbs_free); return 0; } dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent); dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled); if (priv->wrsent >= priv->wrfilled) { dev_dbg(dev, "%s - buffer cleaned\n", __func__); memset(priv->wrbuf, 0, sizeof(priv->wrbuf)); priv->wrfilled = 0; priv->wrsent = 0; } } spin_unlock_irqrestore(&priv->lock, flags); return count; } static unsigned int cyberjack_write_room(struct tty_struct *tty) { /* FIXME: .... */ return CYBERJACK_LOCAL_BUF_SIZE; } static void cyberjack_read_int_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct cyberjack_private *priv = usb_get_serial_port_data(port); struct device *dev = &port->dev; unsigned char *data = urb->transfer_buffer; int status = urb->status; unsigned long flags; int result; /* the urb might have been killed. */ if (status) return; usb_serial_debug_data(dev, __func__, urb->actual_length, data); /* React only to interrupts signaling a bulk_in transfer */ if (urb->actual_length == 4 && data[0] == 0x01) { short old_rdtodo; /* This is a announcement of coming bulk_ins. */ unsigned short size = ((unsigned short)data[3]<<8)+data[2]+3; spin_lock_irqsave(&priv->lock, flags); old_rdtodo = priv->rdtodo; if (old_rdtodo > SHRT_MAX - size) { dev_dbg(dev, "Too many bulk_in urbs to do.\n"); spin_unlock_irqrestore(&priv->lock, flags); goto resubmit; } /* "+=" is probably more fault tolerant than "=" */ priv->rdtodo += size; dev_dbg(dev, "%s - rdtodo: %d\n", __func__, priv->rdtodo); spin_unlock_irqrestore(&priv->lock, flags); if (!old_rdtodo) { result = usb_submit_urb(port->read_urb, GFP_ATOMIC); if (result) dev_err(dev, "%s - failed resubmitting read urb, error %d\n", __func__, result); dev_dbg(dev, "%s - usb_submit_urb(read urb)\n", __func__); } } resubmit: result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); if (result) dev_err(&port->dev, "usb_submit_urb(read int) failed\n"); dev_dbg(dev, "%s - usb_submit_urb(int urb)\n", __func__); } static void cyberjack_read_bulk_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct cyberjack_private *priv = usb_get_serial_port_data(port); struct device *dev = &port->dev; unsigned char *data = urb->transfer_buffer; unsigned long flags; short todo; int result; int status = urb->status; usb_serial_debug_data(dev, __func__, urb->actual_length, data); if (status) { dev_dbg(dev, "%s - nonzero read bulk status received: %d\n", __func__, status); return; } if (urb->actual_length) { tty_insert_flip_string(&port->port, data, urb->actual_length); tty_flip_buffer_push(&port->port); } spin_lock_irqsave(&priv->lock, flags); /* Reduce urbs to do by one. */ priv->rdtodo -= urb->actual_length; /* Just to be sure */ if (priv->rdtodo < 0) priv->rdtodo = 0; todo = priv->rdtodo; spin_unlock_irqrestore(&priv->lock, flags); dev_dbg(dev, "%s - rdtodo: %d\n", __func__, todo); /* Continue to read if we have still urbs to do. */ if (todo /* || (urb->actual_length==port->bulk_in_endpointAddress)*/) { result = usb_submit_urb(port->read_urb, GFP_ATOMIC); if (result) dev_err(dev, "%s - failed resubmitting read urb, error %d\n", __func__, result); dev_dbg(dev, "%s - usb_submit_urb(read urb)\n", __func__); } } static void cyberjack_write_bulk_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct cyberjack_private *priv = usb_get_serial_port_data(port); struct device *dev = &port->dev; int status = urb->status; unsigned long flags; bool resubmitted = false; if (status) { dev_dbg(dev, "%s - nonzero write bulk status received: %d\n", __func__, status); set_bit(0, &port->write_urbs_free); return; } spin_lock_irqsave(&priv->lock, flags); /* only do something if we have more data to send */ if (priv->wrfilled) { int length, blksize, result; dev_dbg(dev, "%s - transmitting data (frame n)\n", __func__); length = ((priv->wrfilled - priv->wrsent) > port->bulk_out_size) ? port->bulk_out_size : (priv->wrfilled - priv->wrsent); memcpy(port->write_urb->transfer_buffer, priv->wrbuf + priv->wrsent, length); priv->wrsent += length; /* set up our urb */ port->write_urb->transfer_buffer_length = length; /* send the data out the bulk port */ result = usb_submit_urb(port->write_urb, GFP_ATOMIC); if (result) { dev_err(dev, "%s - failed submitting write urb, error %d\n", __func__, result); /* Throw away data. No better idea what to do with it. */ priv->wrfilled = 0; priv->wrsent = 0; goto exit; } resubmitted = true; dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent); dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled); blksize = ((int)priv->wrbuf[2]<<8)+priv->wrbuf[1]+3; if (priv->wrsent >= priv->wrfilled || priv->wrsent >= blksize) { dev_dbg(dev, "%s - buffer cleaned\n", __func__); memset(priv->wrbuf, 0, sizeof(priv->wrbuf)); priv->wrfilled = 0; priv->wrsent = 0; } } exit: spin_unlock_irqrestore(&priv->lock, flags); if (!resubmitted) set_bit(0, &port->write_urbs_free); usb_serial_port_softint(port); } module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
1496 1501 903 903 331 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 // SPDX-License-Identifier: GPL-2.0-only /* * Monitoring code for network dropped packet alerts * * Copyright (C) 2009 Neil Horman <nhorman@tuxdriver.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/string.h> #include <linux/if_arp.h> #include <linux/inetdevice.h> #include <linux/inet.h> #include <linux/interrupt.h> #include <linux/netpoll.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/workqueue.h> #include <linux/netlink.h> #include <linux/net_dropmon.h> #include <linux/bitfield.h> #include <linux/percpu.h> #include <linux/timer.h> #include <linux/bitops.h> #include <linux/slab.h> #include <linux/module.h> #include <net/genetlink.h> #include <net/netevent.h> #include <net/flow_offload.h> #include <net/dropreason.h> #include <net/devlink.h> #include <trace/events/skb.h> #include <trace/events/napi.h> #include <trace/events/devlink.h> #include <linux/unaligned.h> #define TRACE_ON 1 #define TRACE_OFF 0 /* * Globals, our netlink socket pointer * and the work handle that will send up * netlink alerts */ static int trace_state = TRACE_OFF; static bool monitor_hw; /* net_dm_mutex * * An overall lock guarding every operation coming from userspace. */ static DEFINE_MUTEX(net_dm_mutex); struct net_dm_stats { u64_stats_t dropped; struct u64_stats_sync syncp; }; #define NET_DM_MAX_HW_TRAP_NAME_LEN 40 struct net_dm_hw_entry { char trap_name[NET_DM_MAX_HW_TRAP_NAME_LEN]; u32 count; }; struct net_dm_hw_entries { u32 num_entries; struct net_dm_hw_entry entries[]; }; struct per_cpu_dm_data { raw_spinlock_t lock; /* Protects 'skb', 'hw_entries' and * 'send_timer' */ union { struct sk_buff *skb; struct net_dm_hw_entries *hw_entries; }; struct sk_buff_head drop_queue; struct work_struct dm_alert_work; struct timer_list send_timer; struct net_dm_stats stats; }; struct dm_hw_stat_delta { unsigned long last_rx; unsigned long last_drop_val; struct rcu_head rcu; }; static struct genl_family net_drop_monitor_family; static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_hw_cpu_data); static int dm_hit_limit = 64; static int dm_delay = 1; static unsigned long dm_hw_check_delta = 2*HZ; static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY; static u32 net_dm_trunc_len; static u32 net_dm_queue_len = 1000; struct net_dm_alert_ops { void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb, void *location, enum skb_drop_reason reason, struct sock *rx_sk); void (*napi_poll_probe)(void *ignore, struct napi_struct *napi, int work, int budget); void (*work_item_func)(struct work_struct *work); void (*hw_work_item_func)(struct work_struct *work); void (*hw_trap_probe)(void *ignore, const struct devlink *devlink, struct sk_buff *skb, const struct devlink_trap_metadata *metadata); }; struct net_dm_skb_cb { union { struct devlink_trap_metadata *hw_metadata; void *pc; }; enum skb_drop_reason reason; }; #define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0])) static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) { size_t al; struct net_dm_alert_msg *msg; struct nlattr *nla; struct sk_buff *skb; unsigned long flags; void *msg_header; al = sizeof(struct net_dm_alert_msg); al += dm_hit_limit * sizeof(struct net_dm_drop_point); al += sizeof(struct nlattr); skb = genlmsg_new(al, GFP_KERNEL); if (!skb) goto err; msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family, 0, NET_DM_CMD_ALERT); if (!msg_header) { nlmsg_free(skb); skb = NULL; goto err; } nla = nla_reserve(skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); if (!nla) { nlmsg_free(skb); skb = NULL; goto err; } msg = nla_data(nla); memset(msg, 0, al); goto out; err: mod_timer(&data->send_timer, jiffies + HZ / 10); out: raw_spin_lock_irqsave(&data->lock, flags); swap(data->skb, skb); raw_spin_unlock_irqrestore(&data->lock, flags); if (skb) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh); genlmsg_end(skb, genlmsg_data(gnlh)); } return skb; } static const struct genl_multicast_group dropmon_mcgrps[] = { { .name = "events", .flags = GENL_MCAST_CAP_SYS_ADMIN, }, }; static void send_dm_alert(struct work_struct *work) { struct sk_buff *skb; struct per_cpu_dm_data *data; data = container_of(work, struct per_cpu_dm_data, dm_alert_work); skb = reset_per_cpu_data(data); if (skb) genlmsg_multicast(&net_drop_monitor_family, skb, 0, 0, GFP_KERNEL); } /* * This is the timer function to delay the sending of an alert * in the event that more drops will arrive during the * hysteresis period. */ static void sched_send_work(struct timer_list *t) { struct per_cpu_dm_data *data = from_timer(data, t, send_timer); schedule_work(&data->dm_alert_work); } static void trace_drop_common(struct sk_buff *skb, void *location) { struct net_dm_alert_msg *msg; struct net_dm_drop_point *point; struct nlmsghdr *nlh; struct nlattr *nla; int i; struct sk_buff *dskb; struct per_cpu_dm_data *data; unsigned long flags; local_irq_save(flags); data = this_cpu_ptr(&dm_cpu_data); raw_spin_lock(&data->lock); dskb = data->skb; if (!dskb) goto out; nlh = (struct nlmsghdr *)dskb->data; nla = genlmsg_data(nlmsg_data(nlh)); msg = nla_data(nla); point = msg->points; for (i = 0; i < msg->entries; i++) { if (!memcmp(&location, &point->pc, sizeof(void *))) { point->count++; goto out; } point++; } if (msg->entries == dm_hit_limit) goto out; /* * We need to create a new entry */ __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); memcpy(point->pc, &location, sizeof(void *)); point->count = 1; msg->entries++; if (!timer_pending(&data->send_timer)) { data->send_timer.expires = jiffies + dm_delay * HZ; add_timer(&data->send_timer); } out: raw_spin_unlock_irqrestore(&data->lock, flags); } static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location, enum skb_drop_reason reason, struct sock *rx_sk) { trace_drop_common(skb, location); } static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi, int work, int budget) { struct net_device *dev = napi->dev; struct dm_hw_stat_delta *stat; /* * Don't check napi structures with no associated device */ if (!dev) return; rcu_read_lock(); stat = rcu_dereference(dev->dm_private); if (stat) { /* * only add a note to our monitor buffer if: * 1) its after the last_rx delta * 2) our rx_dropped count has gone up */ if (time_after(jiffies, stat->last_rx + dm_hw_check_delta) && (dev->stats.rx_dropped != stat->last_drop_val)) { trace_drop_common(NULL, NULL); stat->last_drop_val = dev->stats.rx_dropped; stat->last_rx = jiffies; } } rcu_read_unlock(); } static struct net_dm_hw_entries * net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data) { struct net_dm_hw_entries *hw_entries; unsigned long flags; hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit), GFP_KERNEL); if (!hw_entries) { /* If the memory allocation failed, we try to perform another * allocation in 1/10 second. Otherwise, the probe function * will constantly bail out. */ mod_timer(&hw_data->send_timer, jiffies + HZ / 10); } raw_spin_lock_irqsave(&hw_data->lock, flags); swap(hw_data->hw_entries, hw_entries); raw_spin_unlock_irqrestore(&hw_data->lock, flags); return hw_entries; } static int net_dm_hw_entry_put(struct sk_buff *msg, const struct net_dm_hw_entry *hw_entry) { struct nlattr *attr; attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRY); if (!attr) return -EMSGSIZE; if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME, hw_entry->trap_name)) goto nla_put_failure; if (nla_put_u32(msg, NET_DM_ATTR_HW_TRAP_COUNT, hw_entry->count)) goto nla_put_failure; nla_nest_end(msg, attr); return 0; nla_put_failure: nla_nest_cancel(msg, attr); return -EMSGSIZE; } static int net_dm_hw_entries_put(struct sk_buff *msg, const struct net_dm_hw_entries *hw_entries) { struct nlattr *attr; int i; attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRIES); if (!attr) return -EMSGSIZE; for (i = 0; i < hw_entries->num_entries; i++) { int rc; rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]); if (rc) goto nla_put_failure; } nla_nest_end(msg, attr); return 0; nla_put_failure: nla_nest_cancel(msg, attr); return -EMSGSIZE; } static int net_dm_hw_summary_report_fill(struct sk_buff *msg, const struct net_dm_hw_entries *hw_entries) { struct net_dm_alert_msg anc_hdr = { 0 }; void *hdr; int rc; hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0, NET_DM_CMD_ALERT); if (!hdr) return -EMSGSIZE; /* We need to put the ancillary header in order not to break user * space. */ if (nla_put(msg, NLA_UNSPEC, sizeof(anc_hdr), &anc_hdr)) goto nla_put_failure; rc = net_dm_hw_entries_put(msg, hw_entries); if (rc) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static void net_dm_hw_summary_work(struct work_struct *work) { struct net_dm_hw_entries *hw_entries; struct per_cpu_dm_data *hw_data; struct sk_buff *msg; int rc; hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work); hw_entries = net_dm_hw_reset_per_cpu_data(hw_data); if (!hw_entries) return; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) goto out; rc = net_dm_hw_summary_report_fill(msg, hw_entries); if (rc) { nlmsg_free(msg); goto out; } genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL); out: kfree(hw_entries); } static void net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink, struct sk_buff *skb, const struct devlink_trap_metadata *metadata) { struct net_dm_hw_entries *hw_entries; struct net_dm_hw_entry *hw_entry; struct per_cpu_dm_data *hw_data; unsigned long flags; int i; if (metadata->trap_type == DEVLINK_TRAP_TYPE_CONTROL) return; hw_data = this_cpu_ptr(&dm_hw_cpu_data); raw_spin_lock_irqsave(&hw_data->lock, flags); hw_entries = hw_data->hw_entries; if (!hw_entries) goto out; for (i = 0; i < hw_entries->num_entries; i++) { hw_entry = &hw_entries->entries[i]; if (!strncmp(hw_entry->trap_name, metadata->trap_name, NET_DM_MAX_HW_TRAP_NAME_LEN - 1)) { hw_entry->count++; goto out; } } if (WARN_ON_ONCE(hw_entries->num_entries == dm_hit_limit)) goto out; hw_entry = &hw_entries->entries[hw_entries->num_entries]; strscpy(hw_entry->trap_name, metadata->trap_name, NET_DM_MAX_HW_TRAP_NAME_LEN - 1); hw_entry->count = 1; hw_entries->num_entries++; if (!timer_pending(&hw_data->send_timer)) { hw_data->send_timer.expires = jiffies + dm_delay * HZ; add_timer(&hw_data->send_timer); } out: raw_spin_unlock_irqrestore(&hw_data->lock, flags); } static const struct net_dm_alert_ops net_dm_alert_summary_ops = { .kfree_skb_probe = trace_kfree_skb_hit, .napi_poll_probe = trace_napi_poll_hit, .work_item_func = send_dm_alert, .hw_work_item_func = net_dm_hw_summary_work, .hw_trap_probe = net_dm_hw_trap_summary_probe, }; static void net_dm_packet_trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location, enum skb_drop_reason reason, struct sock *rx_sk) { ktime_t tstamp = ktime_get_real(); struct per_cpu_dm_data *data; struct net_dm_skb_cb *cb; struct sk_buff *nskb; unsigned long flags; if (!skb_mac_header_was_set(skb)) return; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return; cb = NET_DM_SKB_CB(nskb); cb->reason = reason; cb->pc = location; /* Override the timestamp because we care about the time when the * packet was dropped. */ nskb->tstamp = tstamp; data = this_cpu_ptr(&dm_cpu_data); spin_lock_irqsave(&data->drop_queue.lock, flags); if (skb_queue_len(&data->drop_queue) < net_dm_queue_len) __skb_queue_tail(&data->drop_queue, nskb); else goto unlock_free; spin_unlock_irqrestore(&data->drop_queue.lock, flags); schedule_work(&data->dm_alert_work); return; unlock_free: spin_unlock_irqrestore(&data->drop_queue.lock, flags); u64_stats_update_begin(&data->stats.syncp); u64_stats_inc(&data->stats.dropped); u64_stats_update_end(&data->stats.syncp); consume_skb(nskb); } static void net_dm_packet_trace_napi_poll_hit(void *ignore, struct napi_struct *napi, int work, int budget) { } static size_t net_dm_in_port_size(void) { /* NET_DM_ATTR_IN_PORT nest */ return nla_total_size(0) + /* NET_DM_ATTR_PORT_NETDEV_IFINDEX */ nla_total_size(sizeof(u32)) + /* NET_DM_ATTR_PORT_NETDEV_NAME */ nla_total_size(IFNAMSIZ + 1); } #define NET_DM_MAX_SYMBOL_LEN 40 #define NET_DM_MAX_REASON_LEN 50 static size_t net_dm_packet_report_size(size_t payload_len) { size_t size; size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize); return NLMSG_ALIGN(size) + /* NET_DM_ATTR_ORIGIN */ nla_total_size(sizeof(u16)) + /* NET_DM_ATTR_PC */ nla_total_size(sizeof(u64)) + /* NET_DM_ATTR_SYMBOL */ nla_total_size(NET_DM_MAX_SYMBOL_LEN + 1) + /* NET_DM_ATTR_IN_PORT */ net_dm_in_port_size() + /* NET_DM_ATTR_TIMESTAMP */ nla_total_size(sizeof(u64)) + /* NET_DM_ATTR_ORIG_LEN */ nla_total_size(sizeof(u32)) + /* NET_DM_ATTR_PROTO */ nla_total_size(sizeof(u16)) + /* NET_DM_ATTR_REASON */ nla_total_size(NET_DM_MAX_REASON_LEN + 1) + /* NET_DM_ATTR_PAYLOAD */ nla_total_size(payload_len); } static int net_dm_packet_report_in_port_put(struct sk_buff *msg, int ifindex, const char *name) { struct nlattr *attr; attr = nla_nest_start(msg, NET_DM_ATTR_IN_PORT); if (!attr) return -EMSGSIZE; if (ifindex && nla_put_u32(msg, NET_DM_ATTR_PORT_NETDEV_IFINDEX, ifindex)) goto nla_put_failure; if (name && nla_put_string(msg, NET_DM_ATTR_PORT_NETDEV_NAME, name)) goto nla_put_failure; nla_nest_end(msg, attr); return 0; nla_put_failure: nla_nest_cancel(msg, attr); return -EMSGSIZE; } static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, size_t payload_len) { struct net_dm_skb_cb *cb = NET_DM_SKB_CB(skb); const struct drop_reason_list *list = NULL; unsigned int subsys, subsys_reason; char buf[NET_DM_MAX_SYMBOL_LEN]; struct nlattr *attr; void *hdr; int rc; hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0, NET_DM_CMD_PACKET_ALERT); if (!hdr) return -EMSGSIZE; if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_SW)) goto nla_put_failure; if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, (u64)(uintptr_t)cb->pc, NET_DM_ATTR_PAD)) goto nla_put_failure; rcu_read_lock(); subsys = u32_get_bits(cb->reason, SKB_DROP_REASON_SUBSYS_MASK); if (subsys < SKB_DROP_REASON_SUBSYS_NUM) list = rcu_dereference(drop_reasons_by_subsys[subsys]); subsys_reason = cb->reason & ~SKB_DROP_REASON_SUBSYS_MASK; if (!list || subsys_reason >= list->n_reasons || !list->reasons[subsys_reason] || strlen(list->reasons[subsys_reason]) > NET_DM_MAX_REASON_LEN) { list = rcu_dereference(drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_CORE]); subsys_reason = SKB_DROP_REASON_NOT_SPECIFIED; } if (nla_put_string(msg, NET_DM_ATTR_REASON, list->reasons[subsys_reason])) { rcu_read_unlock(); goto nla_put_failure; } rcu_read_unlock(); snprintf(buf, sizeof(buf), "%pS", cb->pc); if (nla_put_string(msg, NET_DM_ATTR_SYMBOL, buf)) goto nla_put_failure; rc = net_dm_packet_report_in_port_put(msg, skb->skb_iif, NULL); if (rc) goto nla_put_failure; if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP, ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD)) goto nla_put_failure; if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len)) goto nla_put_failure; if (!payload_len) goto out; if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol))) goto nla_put_failure; attr = skb_put(msg, nla_total_size(payload_len)); attr->nla_type = NET_DM_ATTR_PAYLOAD; attr->nla_len = nla_attr_size(payload_len); if (skb_copy_bits(skb, 0, nla_data(attr), payload_len)) goto nla_put_failure; out: genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } #define NET_DM_MAX_PACKET_SIZE (0xffff - NLA_HDRLEN - NLA_ALIGNTO) static void net_dm_packet_report(struct sk_buff *skb) { struct sk_buff *msg; size_t payload_len; int rc; /* Make sure we start copying the packet from the MAC header */ if (skb->data > skb_mac_header(skb)) skb_push(skb, skb->data - skb_mac_header(skb)); else skb_pull(skb, skb_mac_header(skb) - skb->data); /* Ensure packet fits inside a single netlink attribute */ payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE); if (net_dm_trunc_len) payload_len = min_t(size_t, net_dm_trunc_len, payload_len); msg = nlmsg_new(net_dm_packet_report_size(payload_len), GFP_KERNEL); if (!msg) goto out; rc = net_dm_packet_report_fill(msg, skb, payload_len); if (rc) { nlmsg_free(msg); goto out; } genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL); out: consume_skb(skb); } static void net_dm_packet_work(struct work_struct *work) { struct per_cpu_dm_data *data; struct sk_buff_head list; struct sk_buff *skb; unsigned long flags; data = container_of(work, struct per_cpu_dm_data, dm_alert_work); __skb_queue_head_init(&list); spin_lock_irqsave(&data->drop_queue.lock, flags); skb_queue_splice_tail_init(&data->drop_queue, &list); spin_unlock_irqrestore(&data->drop_queue.lock, flags); while ((skb = __skb_dequeue(&list))) net_dm_packet_report(skb); } static size_t net_dm_flow_action_cookie_size(const struct devlink_trap_metadata *hw_metadata) { return hw_metadata->fa_cookie ? nla_total_size(hw_metadata->fa_cookie->cookie_len) : 0; } static size_t net_dm_hw_packet_report_size(size_t payload_len, const struct devlink_trap_metadata *hw_metadata) { size_t size; size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize); return NLMSG_ALIGN(size) + /* NET_DM_ATTR_ORIGIN */ nla_total_size(sizeof(u16)) + /* NET_DM_ATTR_HW_TRAP_GROUP_NAME */ nla_total_size(strlen(hw_metadata->trap_group_name) + 1) + /* NET_DM_ATTR_HW_TRAP_NAME */ nla_total_size(strlen(hw_metadata->trap_name) + 1) + /* NET_DM_ATTR_IN_PORT */ net_dm_in_port_size() + /* NET_DM_ATTR_FLOW_ACTION_COOKIE */ net_dm_flow_action_cookie_size(hw_metadata) + /* NET_DM_ATTR_TIMESTAMP */ nla_total_size(sizeof(u64)) + /* NET_DM_ATTR_ORIG_LEN */ nla_total_size(sizeof(u32)) + /* NET_DM_ATTR_PROTO */ nla_total_size(sizeof(u16)) + /* NET_DM_ATTR_PAYLOAD */ nla_total_size(payload_len); } static int net_dm_hw_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, size_t payload_len) { struct devlink_trap_metadata *hw_metadata; struct nlattr *attr; void *hdr; hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata; hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0, NET_DM_CMD_PACKET_ALERT); if (!hdr) return -EMSGSIZE; if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_HW)) goto nla_put_failure; if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_GROUP_NAME, hw_metadata->trap_group_name)) goto nla_put_failure; if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME, hw_metadata->trap_name)) goto nla_put_failure; if (hw_metadata->input_dev) { struct net_device *dev = hw_metadata->input_dev; int rc; rc = net_dm_packet_report_in_port_put(msg, dev->ifindex, dev->name); if (rc) goto nla_put_failure; } if (hw_metadata->fa_cookie && nla_put(msg, NET_DM_ATTR_FLOW_ACTION_COOKIE, hw_metadata->fa_cookie->cookie_len, hw_metadata->fa_cookie->cookie)) goto nla_put_failure; if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP, ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD)) goto nla_put_failure; if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len)) goto nla_put_failure; if (!payload_len) goto out; if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol))) goto nla_put_failure; attr = skb_put(msg, nla_total_size(payload_len)); attr->nla_type = NET_DM_ATTR_PAYLOAD; attr->nla_len = nla_attr_size(payload_len); if (skb_copy_bits(skb, 0, nla_data(attr), payload_len)) goto nla_put_failure; out: genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static struct devlink_trap_metadata * net_dm_hw_metadata_copy(const struct devlink_trap_metadata *metadata) { const struct flow_action_cookie *fa_cookie; struct devlink_trap_metadata *hw_metadata; const char *trap_group_name; const char *trap_name; hw_metadata = kzalloc(sizeof(*hw_metadata), GFP_ATOMIC); if (!hw_metadata) return NULL; trap_group_name = kstrdup(metadata->trap_group_name, GFP_ATOMIC); if (!trap_group_name) goto free_hw_metadata; hw_metadata->trap_group_name = trap_group_name; trap_name = kstrdup(metadata->trap_name, GFP_ATOMIC); if (!trap_name) goto free_trap_group; hw_metadata->trap_name = trap_name; if (metadata->fa_cookie) { size_t cookie_size = sizeof(*fa_cookie) + metadata->fa_cookie->cookie_len; fa_cookie = kmemdup(metadata->fa_cookie, cookie_size, GFP_ATOMIC); if (!fa_cookie) goto free_trap_name; hw_metadata->fa_cookie = fa_cookie; } hw_metadata->input_dev = metadata->input_dev; netdev_hold(hw_metadata->input_dev, &hw_metadata->dev_tracker, GFP_ATOMIC); return hw_metadata; free_trap_name: kfree(trap_name); free_trap_group: kfree(trap_group_name); free_hw_metadata: kfree(hw_metadata); return NULL; } static void net_dm_hw_metadata_free(struct devlink_trap_metadata *hw_metadata) { netdev_put(hw_metadata->input_dev, &hw_metadata->dev_tracker); kfree(hw_metadata->fa_cookie); kfree(hw_metadata->trap_name); kfree(hw_metadata->trap_group_name); kfree(hw_metadata); } static void net_dm_hw_packet_report(struct sk_buff *skb) { struct devlink_trap_metadata *hw_metadata; struct sk_buff *msg; size_t payload_len; int rc; if (skb->data > skb_mac_header(skb)) skb_push(skb, skb->data - skb_mac_header(skb)); else skb_pull(skb, skb_mac_header(skb) - skb->data); payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE); if (net_dm_trunc_len) payload_len = min_t(size_t, net_dm_trunc_len, payload_len); hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata; msg = nlmsg_new(net_dm_hw_packet_report_size(payload_len, hw_metadata), GFP_KERNEL); if (!msg) goto out; rc = net_dm_hw_packet_report_fill(msg, skb, payload_len); if (rc) { nlmsg_free(msg); goto out; } genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL); out: net_dm_hw_metadata_free(NET_DM_SKB_CB(skb)->hw_metadata); consume_skb(skb); } static void net_dm_hw_packet_work(struct work_struct *work) { struct per_cpu_dm_data *hw_data; struct sk_buff_head list; struct sk_buff *skb; unsigned long flags; hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work); __skb_queue_head_init(&list); spin_lock_irqsave(&hw_data->drop_queue.lock, flags); skb_queue_splice_tail_init(&hw_data->drop_queue, &list); spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags); while ((skb = __skb_dequeue(&list))) net_dm_hw_packet_report(skb); } static void net_dm_hw_trap_packet_probe(void *ignore, const struct devlink *devlink, struct sk_buff *skb, const struct devlink_trap_metadata *metadata) { struct devlink_trap_metadata *n_hw_metadata; ktime_t tstamp = ktime_get_real(); struct per_cpu_dm_data *hw_data; struct sk_buff *nskb; unsigned long flags; if (metadata->trap_type == DEVLINK_TRAP_TYPE_CONTROL) return; if (!skb_mac_header_was_set(skb)) return; nskb = skb_clone(skb, GFP_ATOMIC); if (!nskb) return; n_hw_metadata = net_dm_hw_metadata_copy(metadata); if (!n_hw_metadata) goto free; NET_DM_SKB_CB(nskb)->hw_metadata = n_hw_metadata; nskb->tstamp = tstamp; hw_data = this_cpu_ptr(&dm_hw_cpu_data); spin_lock_irqsave(&hw_data->drop_queue.lock, flags); if (skb_queue_len(&hw_data->drop_queue) < net_dm_queue_len) __skb_queue_tail(&hw_data->drop_queue, nskb); else goto unlock_free; spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags); schedule_work(&hw_data->dm_alert_work); return; unlock_free: spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags); u64_stats_update_begin(&hw_data->stats.syncp); u64_stats_inc(&hw_data->stats.dropped); u64_stats_update_end(&hw_data->stats.syncp); net_dm_hw_metadata_free(n_hw_metadata); free: consume_skb(nskb); } static const struct net_dm_alert_ops net_dm_alert_packet_ops = { .kfree_skb_probe = net_dm_packet_trace_kfree_skb_hit, .napi_poll_probe = net_dm_packet_trace_napi_poll_hit, .work_item_func = net_dm_packet_work, .hw_work_item_func = net_dm_hw_packet_work, .hw_trap_probe = net_dm_hw_trap_packet_probe, }; static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = { [NET_DM_ALERT_MODE_SUMMARY] = &net_dm_alert_summary_ops, [NET_DM_ALERT_MODE_PACKET] = &net_dm_alert_packet_ops, }; #if IS_ENABLED(CONFIG_NET_DEVLINK) static int net_dm_hw_probe_register(const struct net_dm_alert_ops *ops) { return register_trace_devlink_trap_report(ops->hw_trap_probe, NULL); } static void net_dm_hw_probe_unregister(const struct net_dm_alert_ops *ops) { unregister_trace_devlink_trap_report(ops->hw_trap_probe, NULL); tracepoint_synchronize_unregister(); } #else static int net_dm_hw_probe_register(const struct net_dm_alert_ops *ops) { return -EOPNOTSUPP; } static void net_dm_hw_probe_unregister(const struct net_dm_alert_ops *ops) { } #endif static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack) { const struct net_dm_alert_ops *ops; int cpu, rc; if (monitor_hw) { NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already enabled"); return -EAGAIN; } ops = net_dm_alert_ops_arr[net_dm_alert_mode]; if (!try_module_get(THIS_MODULE)) { NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module"); return -ENODEV; } for_each_possible_cpu(cpu) { struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); struct net_dm_hw_entries *hw_entries; INIT_WORK(&hw_data->dm_alert_work, ops->hw_work_item_func); timer_setup(&hw_data->send_timer, sched_send_work, 0); hw_entries = net_dm_hw_reset_per_cpu_data(hw_data); kfree(hw_entries); } rc = net_dm_hw_probe_register(ops); if (rc) { NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to devlink_trap_probe() tracepoint"); goto err_module_put; } monitor_hw = true; return 0; err_module_put: for_each_possible_cpu(cpu) { struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); struct sk_buff *skb; del_timer_sync(&hw_data->send_timer); cancel_work_sync(&hw_data->dm_alert_work); while ((skb = __skb_dequeue(&hw_data->drop_queue))) { struct devlink_trap_metadata *hw_metadata; hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata; net_dm_hw_metadata_free(hw_metadata); consume_skb(skb); } } module_put(THIS_MODULE); return rc; } static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack) { const struct net_dm_alert_ops *ops; int cpu; if (!monitor_hw) { NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled"); return; } ops = net_dm_alert_ops_arr[net_dm_alert_mode]; monitor_hw = false; net_dm_hw_probe_unregister(ops); for_each_possible_cpu(cpu) { struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); struct sk_buff *skb; del_timer_sync(&hw_data->send_timer); cancel_work_sync(&hw_data->dm_alert_work); while ((skb = __skb_dequeue(&hw_data->drop_queue))) { struct devlink_trap_metadata *hw_metadata; hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata; net_dm_hw_metadata_free(hw_metadata); consume_skb(skb); } } module_put(THIS_MODULE); } static int net_dm_trace_on_set(struct netlink_ext_ack *extack) { const struct net_dm_alert_ops *ops; int cpu, rc; ops = net_dm_alert_ops_arr[net_dm_alert_mode]; if (!try_module_get(THIS_MODULE)) { NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module"); return -ENODEV; } for_each_possible_cpu(cpu) { struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); struct sk_buff *skb; INIT_WORK(&data->dm_alert_work, ops->work_item_func); timer_setup(&data->send_timer, sched_send_work, 0); /* Allocate a new per-CPU skb for the summary alert message and * free the old one which might contain stale data from * previous tracing. */ skb = reset_per_cpu_data(data); consume_skb(skb); } rc = register_trace_kfree_skb(ops->kfree_skb_probe, NULL); if (rc) { NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to kfree_skb() tracepoint"); goto err_module_put; } rc = register_trace_napi_poll(ops->napi_poll_probe, NULL); if (rc) { NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to napi_poll() tracepoint"); goto err_unregister_trace; } return 0; err_unregister_trace: unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL); err_module_put: for_each_possible_cpu(cpu) { struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); struct sk_buff *skb; del_timer_sync(&data->send_timer); cancel_work_sync(&data->dm_alert_work); while ((skb = __skb_dequeue(&data->drop_queue))) consume_skb(skb); } module_put(THIS_MODULE); return rc; } static void net_dm_trace_off_set(void) { const struct net_dm_alert_ops *ops; int cpu; ops = net_dm_alert_ops_arr[net_dm_alert_mode]; unregister_trace_napi_poll(ops->napi_poll_probe, NULL); unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL); tracepoint_synchronize_unregister(); /* Make sure we do not send notifications to user space after request * to stop tracing returns. */ for_each_possible_cpu(cpu) { struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); struct sk_buff *skb; del_timer_sync(&data->send_timer); cancel_work_sync(&data->dm_alert_work); while ((skb = __skb_dequeue(&data->drop_queue))) consume_skb(skb); } module_put(THIS_MODULE); } static int set_all_monitor_traces(int state, struct netlink_ext_ack *extack) { int rc = 0; if (state == trace_state) { NL_SET_ERR_MSG_MOD(extack, "Trace state already set to requested state"); return -EAGAIN; } switch (state) { case TRACE_ON: rc = net_dm_trace_on_set(extack); break; case TRACE_OFF: net_dm_trace_off_set(); break; default: rc = 1; break; } if (!rc) trace_state = state; else rc = -EINPROGRESS; return rc; } static bool net_dm_is_monitoring(void) { return trace_state == TRACE_ON || monitor_hw; } static int net_dm_alert_mode_get_from_info(struct genl_info *info, enum net_dm_alert_mode *p_alert_mode) { u8 val; val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]); switch (val) { case NET_DM_ALERT_MODE_SUMMARY: case NET_DM_ALERT_MODE_PACKET: *p_alert_mode = val; break; default: return -EINVAL; } return 0; } static int net_dm_alert_mode_set(struct genl_info *info) { struct netlink_ext_ack *extack = info->extack; enum net_dm_alert_mode alert_mode; int rc; if (!info->attrs[NET_DM_ATTR_ALERT_MODE]) return 0; rc = net_dm_alert_mode_get_from_info(info, &alert_mode); if (rc) { NL_SET_ERR_MSG_MOD(extack, "Invalid alert mode"); return -EINVAL; } net_dm_alert_mode = alert_mode; return 0; } static void net_dm_trunc_len_set(struct genl_info *info) { if (!info->attrs[NET_DM_ATTR_TRUNC_LEN]) return; net_dm_trunc_len = nla_get_u32(info->attrs[NET_DM_ATTR_TRUNC_LEN]); } static void net_dm_queue_len_set(struct genl_info *info) { if (!info->attrs[NET_DM_ATTR_QUEUE_LEN]) return; net_dm_queue_len = nla_get_u32(info->attrs[NET_DM_ATTR_QUEUE_LEN]); } static int net_dm_cmd_config(struct sk_buff *skb, struct genl_info *info) { struct netlink_ext_ack *extack = info->extack; int rc; if (net_dm_is_monitoring()) { NL_SET_ERR_MSG_MOD(extack, "Cannot configure drop monitor during monitoring"); return -EBUSY; } rc = net_dm_alert_mode_set(info); if (rc) return rc; net_dm_trunc_len_set(info); net_dm_queue_len_set(info); return 0; } static int net_dm_monitor_start(bool set_sw, bool set_hw, struct netlink_ext_ack *extack) { bool sw_set = false; int rc; if (set_sw) { rc = set_all_monitor_traces(TRACE_ON, extack); if (rc) return rc; sw_set = true; } if (set_hw) { rc = net_dm_hw_monitor_start(extack); if (rc) goto err_monitor_hw; } return 0; err_monitor_hw: if (sw_set) set_all_monitor_traces(TRACE_OFF, extack); return rc; } static void net_dm_monitor_stop(bool set_sw, bool set_hw, struct netlink_ext_ack *extack) { if (set_hw) net_dm_hw_monitor_stop(extack); if (set_sw) set_all_monitor_traces(TRACE_OFF, extack); } static int net_dm_cmd_trace(struct sk_buff *skb, struct genl_info *info) { bool set_sw = !!info->attrs[NET_DM_ATTR_SW_DROPS]; bool set_hw = !!info->attrs[NET_DM_ATTR_HW_DROPS]; struct netlink_ext_ack *extack = info->extack; /* To maintain backward compatibility, we start / stop monitoring of * software drops if no flag is specified. */ if (!set_sw && !set_hw) set_sw = true; switch (info->genlhdr->cmd) { case NET_DM_CMD_START: return net_dm_monitor_start(set_sw, set_hw, extack); case NET_DM_CMD_STOP: net_dm_monitor_stop(set_sw, set_hw, extack); return 0; } return -EOPNOTSUPP; } static int net_dm_config_fill(struct sk_buff *msg, struct genl_info *info) { void *hdr; hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &net_drop_monitor_family, 0, NET_DM_CMD_CONFIG_NEW); if (!hdr) return -EMSGSIZE; if (nla_put_u8(msg, NET_DM_ATTR_ALERT_MODE, net_dm_alert_mode)) goto nla_put_failure; if (nla_put_u32(msg, NET_DM_ATTR_TRUNC_LEN, net_dm_trunc_len)) goto nla_put_failure; if (nla_put_u32(msg, NET_DM_ATTR_QUEUE_LEN, net_dm_queue_len)) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int net_dm_cmd_config_get(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; int rc; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; rc = net_dm_config_fill(msg, info); if (rc) goto free_msg; return genlmsg_reply(msg, info); free_msg: nlmsg_free(msg); return rc; } static void net_dm_stats_read(struct net_dm_stats *stats) { int cpu; memset(stats, 0, sizeof(*stats)); for_each_possible_cpu(cpu) { struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); struct net_dm_stats *cpu_stats = &data->stats; unsigned int start; u64 dropped; do { start = u64_stats_fetch_begin(&cpu_stats->syncp); dropped = u64_stats_read(&cpu_stats->dropped); } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); u64_stats_add(&stats->dropped, dropped); } } static int net_dm_stats_put(struct sk_buff *msg) { struct net_dm_stats stats; struct nlattr *attr; net_dm_stats_read(&stats); attr = nla_nest_start(msg, NET_DM_ATTR_STATS); if (!attr) return -EMSGSIZE; if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED, u64_stats_read(&stats.dropped), NET_DM_ATTR_PAD)) goto nla_put_failure; nla_nest_end(msg, attr); return 0; nla_put_failure: nla_nest_cancel(msg, attr); return -EMSGSIZE; } static void net_dm_hw_stats_read(struct net_dm_stats *stats) { int cpu; memset(stats, 0, sizeof(*stats)); for_each_possible_cpu(cpu) { struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); struct net_dm_stats *cpu_stats = &hw_data->stats; unsigned int start; u64 dropped; do { start = u64_stats_fetch_begin(&cpu_stats->syncp); dropped = u64_stats_read(&cpu_stats->dropped); } while (u64_stats_fetch_retry(&cpu_stats->syncp, start)); u64_stats_add(&stats->dropped, dropped); } } static int net_dm_hw_stats_put(struct sk_buff *msg) { struct net_dm_stats stats; struct nlattr *attr; net_dm_hw_stats_read(&stats); attr = nla_nest_start(msg, NET_DM_ATTR_HW_STATS); if (!attr) return -EMSGSIZE; if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED, u64_stats_read(&stats.dropped), NET_DM_ATTR_PAD)) goto nla_put_failure; nla_nest_end(msg, attr); return 0; nla_put_failure: nla_nest_cancel(msg, attr); return -EMSGSIZE; } static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info) { void *hdr; int rc; hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &net_drop_monitor_family, 0, NET_DM_CMD_STATS_NEW); if (!hdr) return -EMSGSIZE; rc = net_dm_stats_put(msg); if (rc) goto nla_put_failure; rc = net_dm_hw_stats_put(msg); if (rc) goto nla_put_failure; genlmsg_end(msg, hdr); return 0; nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int net_dm_cmd_stats_get(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; int rc; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; rc = net_dm_stats_fill(msg, info); if (rc) goto free_msg; return genlmsg_reply(msg, info); free_msg: nlmsg_free(msg); return rc; } static int dropmon_net_event(struct notifier_block *ev_block, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct dm_hw_stat_delta *stat; switch (event) { case NETDEV_REGISTER: if (WARN_ON_ONCE(rtnl_dereference(dev->dm_private))) break; stat = kzalloc(sizeof(*stat), GFP_KERNEL); if (!stat) break; stat->last_rx = jiffies; rcu_assign_pointer(dev->dm_private, stat); break; case NETDEV_UNREGISTER: stat = rtnl_dereference(dev->dm_private); if (stat) { rcu_assign_pointer(dev->dm_private, NULL); kfree_rcu(stat, rcu); } break; } return NOTIFY_DONE; } static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = { [NET_DM_ATTR_UNSPEC] = { .strict_start_type = NET_DM_ATTR_UNSPEC + 1 }, [NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 }, [NET_DM_ATTR_TRUNC_LEN] = { .type = NLA_U32 }, [NET_DM_ATTR_QUEUE_LEN] = { .type = NLA_U32 }, [NET_DM_ATTR_SW_DROPS] = {. type = NLA_FLAG }, [NET_DM_ATTR_HW_DROPS] = {. type = NLA_FLAG }, }; static const struct genl_small_ops dropmon_ops[] = { { .cmd = NET_DM_CMD_CONFIG, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = net_dm_cmd_config, .flags = GENL_ADMIN_PERM, }, { .cmd = NET_DM_CMD_START, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = net_dm_cmd_trace, .flags = GENL_ADMIN_PERM, }, { .cmd = NET_DM_CMD_STOP, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = net_dm_cmd_trace, .flags = GENL_ADMIN_PERM, }, { .cmd = NET_DM_CMD_CONFIG_GET, .doit = net_dm_cmd_config_get, }, { .cmd = NET_DM_CMD_STATS_GET, .doit = net_dm_cmd_stats_get, }, }; static int net_dm_nl_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { mutex_lock(&net_dm_mutex); return 0; } static void net_dm_nl_post_doit(const struct genl_split_ops *ops, struct sk_buff *skb, struct genl_info *info) { mutex_unlock(&net_dm_mutex); } static struct genl_family net_drop_monitor_family __ro_after_init = { .hdrsize = 0, .name = "NET_DM", .version = 2, .maxattr = NET_DM_ATTR_MAX, .policy = net_dm_nl_policy, .pre_doit = net_dm_nl_pre_doit, .post_doit = net_dm_nl_post_doit, .module = THIS_MODULE, .small_ops = dropmon_ops, .n_small_ops = ARRAY_SIZE(dropmon_ops), .resv_start_op = NET_DM_CMD_STATS_GET + 1, .mcgrps = dropmon_mcgrps, .n_mcgrps = ARRAY_SIZE(dropmon_mcgrps), }; static struct notifier_block dropmon_net_notifier = { .notifier_call = dropmon_net_event }; static void __net_dm_cpu_data_init(struct per_cpu_dm_data *data) { raw_spin_lock_init(&data->lock); skb_queue_head_init(&data->drop_queue); u64_stats_init(&data->stats.syncp); } static void __net_dm_cpu_data_fini(struct per_cpu_dm_data *data) { WARN_ON(!skb_queue_empty(&data->drop_queue)); } static void net_dm_cpu_data_init(int cpu) { struct per_cpu_dm_data *data; data = &per_cpu(dm_cpu_data, cpu); __net_dm_cpu_data_init(data); } static void net_dm_cpu_data_fini(int cpu) { struct per_cpu_dm_data *data; data = &per_cpu(dm_cpu_data, cpu); /* At this point, we should have exclusive access * to this struct and can free the skb inside it. */ consume_skb(data->skb); __net_dm_cpu_data_fini(data); } static void net_dm_hw_cpu_data_init(int cpu) { struct per_cpu_dm_data *hw_data; hw_data = &per_cpu(dm_hw_cpu_data, cpu); __net_dm_cpu_data_init(hw_data); } static void net_dm_hw_cpu_data_fini(int cpu) { struct per_cpu_dm_data *hw_data; hw_data = &per_cpu(dm_hw_cpu_data, cpu); kfree(hw_data->hw_entries); __net_dm_cpu_data_fini(hw_data); } static int __init init_net_drop_monitor(void) { int cpu, rc; pr_info("Initializing network drop monitor service\n"); if (sizeof(void *) > 8) { pr_err("Unable to store program counters on this arch, Drop monitor failed\n"); return -ENOSPC; } rc = genl_register_family(&net_drop_monitor_family); if (rc) { pr_err("Could not create drop monitor netlink family\n"); return rc; } WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT); rc = register_netdevice_notifier(&dropmon_net_notifier); if (rc < 0) { pr_crit("Failed to register netdevice notifier\n"); goto out_unreg; } rc = 0; for_each_possible_cpu(cpu) { net_dm_cpu_data_init(cpu); net_dm_hw_cpu_data_init(cpu); } goto out; out_unreg: genl_unregister_family(&net_drop_monitor_family); out: return rc; } static void exit_net_drop_monitor(void) { int cpu; BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); /* * Because of the module_get/put we do in the trace state change path * we are guaranteed not to have any current users when we get here */ for_each_possible_cpu(cpu) { net_dm_hw_cpu_data_fini(cpu); net_dm_cpu_data_fini(cpu); } BUG_ON(genl_unregister_family(&net_drop_monitor_family)); } module_init(init_net_drop_monitor); module_exit(exit_net_drop_monitor); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); MODULE_ALIAS_GENL_FAMILY("NET_DM"); MODULE_DESCRIPTION("Monitoring code for network dropped packet alerts");
625 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 /* * Copyright (C) 2016 Red Hat * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rob Clark <robdclark@gmail.com> */ #ifndef DRM_PRINT_H_ #define DRM_PRINT_H_ #include <linux/compiler.h> #include <linux/printk.h> #include <linux/device.h> #include <linux/dynamic_debug.h> #include <drm/drm.h> struct debugfs_regset32; struct drm_device; struct seq_file; /* Do *not* use outside of drm_print.[ch]! */ extern unsigned long __drm_debug; /** * DOC: print * * A simple wrapper for dev_printk(), seq_printf(), etc. Allows same * debug code to be used for both debugfs and printk logging. * * For example:: * * void log_some_info(struct drm_printer *p) * { * drm_printf(p, "foo=%d\n", foo); * drm_printf(p, "bar=%d\n", bar); * } * * #ifdef CONFIG_DEBUG_FS * void debugfs_show(struct seq_file *f) * { * struct drm_printer p = drm_seq_file_printer(f); * log_some_info(&p); * } * #endif * * void some_other_function(...) * { * struct drm_printer p = drm_info_printer(drm->dev); * log_some_info(&p); * } */ /** * enum drm_debug_category - The DRM debug categories * * Each of the DRM debug logging macros use a specific category, and the logging * is filtered by the drm.debug module parameter. This enum specifies the values * for the interface. * * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except * DRM_DEBUG() logs to DRM_UT_CORE. * * Enabling verbose debug messages is done through the drm.debug parameter, each * category being enabled by a bit: * * - drm.debug=0x1 will enable CORE messages * - drm.debug=0x2 will enable DRIVER messages * - drm.debug=0x3 will enable CORE and DRIVER messages * - ... * - drm.debug=0x1ff will enable all messages * * An interesting feature is that it's possible to enable verbose logging at * run-time by echoing the debug value in its sysfs node:: * * # echo 0xf > /sys/module/drm/parameters/debug * */ enum drm_debug_category { /* These names must match those in DYNAMIC_DEBUG_CLASSBITS */ /** * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, * drm_memory.c, ... */ DRM_UT_CORE, /** * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915, * radeon, ... macro. */ DRM_UT_DRIVER, /** * @DRM_UT_KMS: Used in the modesetting code. */ DRM_UT_KMS, /** * @DRM_UT_PRIME: Used in the prime code. */ DRM_UT_PRIME, /** * @DRM_UT_ATOMIC: Used in the atomic code. */ DRM_UT_ATOMIC, /** * @DRM_UT_VBL: Used for verbose debug message in the vblank code. */ DRM_UT_VBL, /** * @DRM_UT_STATE: Used for verbose atomic state debugging. */ DRM_UT_STATE, /** * @DRM_UT_LEASE: Used in the lease code. */ DRM_UT_LEASE, /** * @DRM_UT_DP: Used in the DP code. */ DRM_UT_DP, /** * @DRM_UT_DRMRES: Used in the drm managed resources code. */ DRM_UT_DRMRES }; static inline bool drm_debug_enabled_raw(enum drm_debug_category category) { return unlikely(__drm_debug & BIT(category)); } #define drm_debug_enabled_instrumented(category) \ ({ \ pr_debug("todo: is this frequent enough to optimize ?\n"); \ drm_debug_enabled_raw(category); \ }) #if defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) /* * the drm.debug API uses dyndbg, so each drm_*dbg macro/callsite gets * a descriptor, and only enabled callsites are reachable. They use * the private macro to avoid re-testing the enable-bit. */ #define __drm_debug_enabled(category) true #define drm_debug_enabled(category) drm_debug_enabled_instrumented(category) #else #define __drm_debug_enabled(category) drm_debug_enabled_raw(category) #define drm_debug_enabled(category) drm_debug_enabled_raw(category) #endif /** * struct drm_printer - drm output "stream" * * Do not use struct members directly. Use drm_printer_seq_file(), * drm_printer_info(), etc to initialize. And drm_printf() for output. */ struct drm_printer { /* private: */ void (*printfn)(struct drm_printer *p, struct va_format *vaf); void (*puts)(struct drm_printer *p, const char *str); void *arg; const void *origin; const char *prefix; struct { unsigned int series; unsigned int counter; } line; enum drm_debug_category category; }; void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf); void __drm_puts_coredump(struct drm_printer *p, const char *str); void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf); void __drm_puts_seq_file(struct drm_printer *p, const char *str); void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_dbg(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_line(struct drm_printer *p, struct va_format *vaf); __printf(2, 3) void drm_printf(struct drm_printer *p, const char *f, ...); void drm_puts(struct drm_printer *p, const char *str); void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset); void drm_print_bits(struct drm_printer *p, unsigned long value, const char * const bits[], unsigned int nbits); void drm_print_hex_dump(struct drm_printer *p, const char *prefix, const u8 *buf, size_t len); __printf(2, 0) /** * drm_vprintf - print to a &drm_printer stream * @p: the &drm_printer * @fmt: format string * @va: the va_list */ static inline void drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va) { struct va_format vaf = { .fmt = fmt, .va = va }; p->printfn(p, &vaf); } /** * drm_printf_indent - Print to a &drm_printer stream with indentation * @printer: DRM printer * @indent: Tab indentation level (max 5) * @fmt: Format string */ #define drm_printf_indent(printer, indent, fmt, ...) \ drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__) /** * struct drm_print_iterator - local struct used with drm_printer_coredump * @data: Pointer to the devcoredump output buffer, can be NULL if using * drm_printer_coredump to determine size of devcoredump * @start: The offset within the buffer to start writing * @remain: The number of bytes to write for this iteration */ struct drm_print_iterator { void *data; ssize_t start; ssize_t remain; /* private: */ ssize_t offset; }; /** * drm_coredump_printer - construct a &drm_printer that can output to a buffer * from the read function for devcoredump * @iter: A pointer to a struct drm_print_iterator for the read instance * * This wrapper extends drm_printf() to work with a dev_coredumpm() callback * function. The passed in drm_print_iterator struct contains the buffer * pointer, size and offset as passed in from devcoredump. * * For example:: * * void coredump_read(char *buffer, loff_t offset, size_t count, * void *data, size_t datalen) * { * struct drm_print_iterator iter; * struct drm_printer p; * * iter.data = buffer; * iter.start = offset; * iter.remain = count; * * p = drm_coredump_printer(&iter); * * drm_printf(p, "foo=%d\n", foo); * } * * void makecoredump(...) * { * ... * dev_coredumpm(dev, THIS_MODULE, data, 0, GFP_KERNEL, * coredump_read, ...) * } * * The above example has a time complexity of O(N^2), where N is the size of the * devcoredump. This is acceptable for small devcoredumps but scales poorly for * larger ones. * * Another use case for drm_coredump_printer is to capture the devcoredump into * a saved buffer before the dev_coredump() callback. This involves two passes: * one to determine the size of the devcoredump and another to print it to a * buffer. Then, in dev_coredump(), copy from the saved buffer into the * devcoredump read buffer. * * For example:: * * char *devcoredump_saved_buffer; * * ssize_t __coredump_print(char *buffer, ssize_t count, ...) * { * struct drm_print_iterator iter; * struct drm_printer p; * * iter.data = buffer; * iter.start = 0; * iter.remain = count; * * p = drm_coredump_printer(&iter); * * drm_printf(p, "foo=%d\n", foo); * ... * return count - iter.remain; * } * * void coredump_print(...) * { * ssize_t count; * * count = __coredump_print(NULL, INT_MAX, ...); * devcoredump_saved_buffer = kvmalloc(count, GFP_KERNEL); * __coredump_print(devcoredump_saved_buffer, count, ...); * } * * void coredump_read(char *buffer, loff_t offset, size_t count, * void *data, size_t datalen) * { * ... * memcpy(buffer, devcoredump_saved_buffer + offset, count); * ... * } * * The above example has a time complexity of O(N*2), where N is the size of the * devcoredump. This scales better than the previous example for larger * devcoredumps. * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_coredump_printer(struct drm_print_iterator *iter) { struct drm_printer p = { .printfn = __drm_printfn_coredump, .puts = __drm_puts_coredump, .arg = iter, }; /* Set the internal offset of the iterator to zero */ iter->offset = 0; return p; } /** * drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file * @f: the &struct seq_file to output to * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_seq_file_printer(struct seq_file *f) { struct drm_printer p = { .printfn = __drm_printfn_seq_file, .puts = __drm_puts_seq_file, .arg = f, }; return p; } /** * drm_info_printer - construct a &drm_printer that outputs to dev_printk() * @dev: the &struct device pointer * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_info_printer(struct device *dev) { struct drm_printer p = { .printfn = __drm_printfn_info, .arg = dev, }; return p; } /** * drm_dbg_printer - construct a &drm_printer for drm device specific output * @drm: the &struct drm_device pointer, or NULL * @category: the debug category to use * @prefix: debug output prefix, or NULL for no prefix * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_dbg_printer(struct drm_device *drm, enum drm_debug_category category, const char *prefix) { struct drm_printer p = { .printfn = __drm_printfn_dbg, .arg = drm, .origin = (const void *)_THIS_IP_, /* it's fine as we will be inlined */ .prefix = prefix, .category = category, }; return p; } /** * drm_err_printer - construct a &drm_printer that outputs to drm_err() * @drm: the &struct drm_device pointer * @prefix: debug output prefix, or NULL for no prefix * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_err_printer(struct drm_device *drm, const char *prefix) { struct drm_printer p = { .printfn = __drm_printfn_err, .arg = drm, .prefix = prefix }; return p; } /** * drm_line_printer - construct a &drm_printer that prefixes outputs with line numbers * @p: the &struct drm_printer which actually generates the output * @prefix: optional output prefix, or NULL for no prefix * @series: optional unique series identifier, or 0 to omit identifier in the output * * This printer can be used to increase the robustness of the captured output * to make sure we didn't lost any intermediate lines of the output. Helpful * while capturing some crash data. * * Example 1:: * * void crash_dump(struct drm_device *drm) * { * static unsigned int id; * struct drm_printer p = drm_err_printer(drm, "crash"); * struct drm_printer lp = drm_line_printer(&p, "dump", ++id); * * drm_printf(&lp, "foo"); * drm_printf(&lp, "bar"); * } * * Above code will print into the dmesg something like:: * * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.1: foo * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.2: bar * * Example 2:: * * void line_dump(struct device *dev) * { * struct drm_printer p = drm_info_printer(dev); * struct drm_printer lp = drm_line_printer(&p, NULL, 0); * * drm_printf(&lp, "foo"); * drm_printf(&lp, "bar"); * } * * Above code will print:: * * [ ] 0000:00:00.0: [drm] 1: foo * [ ] 0000:00:00.0: [drm] 2: bar * * RETURNS: * The &drm_printer object */ static inline struct drm_printer drm_line_printer(struct drm_printer *p, const char *prefix, unsigned int series) { struct drm_printer lp = { .printfn = __drm_printfn_line, .arg = p, .prefix = prefix, .line = { .series = series, }, }; return lp; } /* * struct device based logging * * Prefer drm_device based logging over device or printk based logging. */ __printf(3, 4) void drm_dev_printk(const struct device *dev, const char *level, const char *format, ...); struct _ddebug; __printf(4, 5) void __drm_dev_dbg(struct _ddebug *desc, const struct device *dev, enum drm_debug_category category, const char *format, ...); /** * DRM_DEV_ERROR() - Error output. * * NOTE: this is deprecated in favor of drm_err() or dev_err(). * * @dev: device pointer * @fmt: printf() like format string. */ #define DRM_DEV_ERROR(dev, fmt, ...) \ drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__) /** * DRM_DEV_ERROR_RATELIMITED() - Rate limited error output. * * NOTE: this is deprecated in favor of drm_err_ratelimited() or * dev_err_ratelimited(). * * @dev: device pointer * @fmt: printf() like format string. * * Like DRM_ERROR() but won't flood the log. */ #define DRM_DEV_ERROR_RATELIMITED(dev, fmt, ...) \ ({ \ static DEFINE_RATELIMIT_STATE(_rs, \ DEFAULT_RATELIMIT_INTERVAL, \ DEFAULT_RATELIMIT_BURST); \ \ if (__ratelimit(&_rs)) \ DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \ }) /* NOTE: this is deprecated in favor of drm_info() or dev_info(). */ #define DRM_DEV_INFO(dev, fmt, ...) \ drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_info_once() or dev_info_once(). */ #define DRM_DEV_INFO_ONCE(dev, fmt, ...) \ ({ \ static bool __print_once __read_mostly; \ if (!__print_once) { \ __print_once = true; \ DRM_DEV_INFO(dev, fmt, ##__VA_ARGS__); \ } \ }) #if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) #define drm_dev_dbg(dev, cat, fmt, ...) \ __drm_dev_dbg(NULL, dev, cat, fmt, ##__VA_ARGS__) #else #define drm_dev_dbg(dev, cat, fmt, ...) \ _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \ dev, cat, fmt, ##__VA_ARGS__) #endif /** * DRM_DEV_DEBUG() - Debug output for generic drm code * * NOTE: this is deprecated in favor of drm_dbg_core(). * * @dev: device pointer * @fmt: printf() like format string. */ #define DRM_DEV_DEBUG(dev, fmt, ...) \ drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__) /** * DRM_DEV_DEBUG_DRIVER() - Debug output for vendor specific part of the driver * * NOTE: this is deprecated in favor of drm_dbg() or dev_dbg(). * * @dev: device pointer * @fmt: printf() like format string. */ #define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \ drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) /** * DRM_DEV_DEBUG_KMS() - Debug output for modesetting code * * NOTE: this is deprecated in favor of drm_dbg_kms(). * * @dev: device pointer * @fmt: printf() like format string. */ #define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \ drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__) /* * struct drm_device based logging * * Prefer drm_device based logging over device or prink based logging. */ /* Helper for struct drm_device based logging. */ #define __drm_printk(drm, level, type, fmt, ...) \ dev_##level##type((drm) ? (drm)->dev : NULL, "[drm] " fmt, ##__VA_ARGS__) #define drm_info(drm, fmt, ...) \ __drm_printk((drm), info,, fmt, ##__VA_ARGS__) #define drm_notice(drm, fmt, ...) \ __drm_printk((drm), notice,, fmt, ##__VA_ARGS__) #define drm_warn(drm, fmt, ...) \ __drm_printk((drm), warn,, fmt, ##__VA_ARGS__) #define drm_err(drm, fmt, ...) \ __drm_printk((drm), err,, "*ERROR* " fmt, ##__VA_ARGS__) #define drm_info_once(drm, fmt, ...) \ __drm_printk((drm), info, _once, fmt, ##__VA_ARGS__) #define drm_notice_once(drm, fmt, ...) \ __drm_printk((drm), notice, _once, fmt, ##__VA_ARGS__) #define drm_warn_once(drm, fmt, ...) \ __drm_printk((drm), warn, _once, fmt, ##__VA_ARGS__) #define drm_err_once(drm, fmt, ...) \ __drm_printk((drm), err, _once, "*ERROR* " fmt, ##__VA_ARGS__) #define drm_err_ratelimited(drm, fmt, ...) \ __drm_printk((drm), err, _ratelimited, "*ERROR* " fmt, ##__VA_ARGS__) #define drm_dbg_core(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__) #define drm_dbg_driver(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__) #define drm_dbg_kms(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__) #define drm_dbg_prime(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__) #define drm_dbg_atomic(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) #define drm_dbg_vbl(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__) #define drm_dbg_state(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__) #define drm_dbg_lease(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__) #define drm_dbg_dp(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__) #define drm_dbg_drmres(drm, fmt, ...) \ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__) #define drm_dbg(drm, fmt, ...) drm_dbg_driver(drm, fmt, ##__VA_ARGS__) /* * printk based logging * * Prefer drm_device based logging over device or prink based logging. */ __printf(1, 2) void __drm_err(const char *format, ...); #if !defined(CONFIG_DRM_USE_DYNAMIC_DEBUG) #define __drm_dbg(cat, fmt, ...) __drm_dev_dbg(NULL, NULL, cat, fmt, ##__VA_ARGS__) #else #define __drm_dbg(cat, fmt, ...) \ _dynamic_func_call_cls(cat, fmt, __drm_dev_dbg, \ NULL, cat, fmt, ##__VA_ARGS__) #endif /* Macros to make printk easier */ #define _DRM_PRINTK(once, level, fmt, ...) \ printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_info(). */ #define DRM_INFO(fmt, ...) \ _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_notice(). */ #define DRM_NOTE(fmt, ...) \ _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_warn(). */ #define DRM_WARN(fmt, ...) \ _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_info_once(). */ #define DRM_INFO_ONCE(fmt, ...) \ _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_notice_once(). */ #define DRM_NOTE_ONCE(fmt, ...) \ _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_warn_once(). */ #define DRM_WARN_ONCE(fmt, ...) \ _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_err(). */ #define DRM_ERROR(fmt, ...) \ __drm_err(fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of pr_err_ratelimited(). */ #define DRM_ERROR_RATELIMITED(fmt, ...) \ DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_core(NULL, ...). */ #define DRM_DEBUG(fmt, ...) \ __drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg(NULL, ...). */ #define DRM_DEBUG_DRIVER(fmt, ...) \ __drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_kms(NULL, ...). */ #define DRM_DEBUG_KMS(fmt, ...) \ __drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_prime(NULL, ...). */ #define DRM_DEBUG_PRIME(fmt, ...) \ __drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_atomic(NULL, ...). */ #define DRM_DEBUG_ATOMIC(fmt, ...) \ __drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_vbl(NULL, ...). */ #define DRM_DEBUG_VBL(fmt, ...) \ __drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_lease(NULL, ...). */ #define DRM_DEBUG_LEASE(fmt, ...) \ __drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__) /* NOTE: this is deprecated in favor of drm_dbg_dp(NULL, ...). */ #define DRM_DEBUG_DP(fmt, ...) \ __drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__) #define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \ ({ \ static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\ const struct drm_device *drm_ = (drm); \ \ if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \ drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \ }) #define drm_dbg_ratelimited(drm, fmt, ...) \ __DRM_DEFINE_DBG_RATELIMITED(DRIVER, drm, fmt, ## __VA_ARGS__) #define drm_dbg_kms_ratelimited(drm, fmt, ...) \ __DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__) /* * struct drm_device based WARNs * * drm_WARN*() acts like WARN*(), but with the key difference of * using device specific information so that we know from which device * warning is originating from. * * Prefer drm_device based drm_WARN* over regular WARN* */ /* Helper for struct drm_device based WARNs */ #define drm_WARN(drm, condition, format, arg...) \ WARN(condition, "%s %s: [drm] " format, \ dev_driver_string((drm)->dev), \ dev_name((drm)->dev), ## arg) #define drm_WARN_ONCE(drm, condition, format, arg...) \ WARN_ONCE(condition, "%s %s: [drm] " format, \ dev_driver_string((drm)->dev), \ dev_name((drm)->dev), ## arg) #define drm_WARN_ON(drm, x) \ drm_WARN((drm), (x), "%s", \ "drm_WARN_ON(" __stringify(x) ")") #define drm_WARN_ON_ONCE(drm, x) \ drm_WARN_ONCE((drm), (x), "%s", \ "drm_WARN_ON_ONCE(" __stringify(x) ")") #endif /* DRM_PRINT_H_ */
4 4 2 4 9 9 7 7 6 6 6 6 8 6 8 1497 1490 9 7 6 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007, 2008, 2009 Siemens AG */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <net/cfg802154.h> #include <net/rtnetlink.h> #include "ieee802154.h" #include "nl802154.h" #include "sysfs.h" #include "core.h" /* name for sysfs, %d is appended */ #define PHY_NAME "phy" /* RCU-protected (and RTNL for writers) */ LIST_HEAD(cfg802154_rdev_list); int cfg802154_rdev_list_generation; struct wpan_phy *wpan_phy_find(const char *str) { struct device *dev; if (WARN_ON(!str)) return NULL; dev = class_find_device_by_name(&wpan_phy_class, str); if (!dev) return NULL; return container_of(dev, struct wpan_phy, dev); } EXPORT_SYMBOL(wpan_phy_find); struct wpan_phy_iter_data { int (*fn)(struct wpan_phy *phy, void *data); void *data; }; static int wpan_phy_iter(struct device *dev, void *_data) { struct wpan_phy_iter_data *wpid = _data; struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); return wpid->fn(phy, wpid->data); } int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data), void *data) { struct wpan_phy_iter_data wpid = { .fn = fn, .data = data, }; return class_for_each_device(&wpan_phy_class, NULL, &wpid, wpan_phy_iter); } EXPORT_SYMBOL(wpan_phy_for_each); struct cfg802154_registered_device * cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx) { struct cfg802154_registered_device *result = NULL, *rdev; ASSERT_RTNL(); list_for_each_entry(rdev, &cfg802154_rdev_list, list) { if (rdev->wpan_phy_idx == wpan_phy_idx) { result = rdev; break; } } return result; } struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx) { struct cfg802154_registered_device *rdev; ASSERT_RTNL(); rdev = cfg802154_rdev_by_wpan_phy_idx(wpan_phy_idx); if (!rdev) return NULL; return &rdev->wpan_phy; } struct wpan_phy * wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size) { static atomic_t wpan_phy_counter = ATOMIC_INIT(0); struct cfg802154_registered_device *rdev; size_t alloc_size; alloc_size = sizeof(*rdev) + priv_size; rdev = kzalloc(alloc_size, GFP_KERNEL); if (!rdev) return NULL; rdev->ops = ops; rdev->wpan_phy_idx = atomic_inc_return(&wpan_phy_counter); if (unlikely(rdev->wpan_phy_idx < 0)) { /* ugh, wrapped! */ atomic_dec(&wpan_phy_counter); kfree(rdev); return NULL; } /* atomic_inc_return makes it start at 1, make it start at 0 */ rdev->wpan_phy_idx--; INIT_LIST_HEAD(&rdev->wpan_dev_list); device_initialize(&rdev->wpan_phy.dev); dev_set_name(&rdev->wpan_phy.dev, PHY_NAME "%d", rdev->wpan_phy_idx); rdev->wpan_phy.dev.class = &wpan_phy_class; rdev->wpan_phy.dev.platform_data = rdev; wpan_phy_net_set(&rdev->wpan_phy, &init_net); init_waitqueue_head(&rdev->dev_wait); init_waitqueue_head(&rdev->wpan_phy.sync_txq); spin_lock_init(&rdev->wpan_phy.queue_lock); return &rdev->wpan_phy; } EXPORT_SYMBOL(wpan_phy_new); int wpan_phy_register(struct wpan_phy *phy) { struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(phy); int ret; rtnl_lock(); ret = device_add(&phy->dev); if (ret) { rtnl_unlock(); return ret; } list_add_rcu(&rdev->list, &cfg802154_rdev_list); cfg802154_rdev_list_generation++; /* TODO phy registered lock */ rtnl_unlock(); /* TODO nl802154 phy notify */ return 0; } EXPORT_SYMBOL(wpan_phy_register); void wpan_phy_unregister(struct wpan_phy *phy) { struct cfg802154_registered_device *rdev = wpan_phy_to_rdev(phy); wait_event(rdev->dev_wait, ({ int __count; rtnl_lock(); __count = rdev->opencount; rtnl_unlock(); __count == 0; })); rtnl_lock(); /* TODO nl802154 phy notify */ /* TODO phy registered lock */ WARN_ON(!list_empty(&rdev->wpan_dev_list)); /* First remove the hardware from everywhere, this makes * it impossible to find from userspace. */ list_del_rcu(&rdev->list); synchronize_rcu(); cfg802154_rdev_list_generation++; device_del(&phy->dev); rtnl_unlock(); } EXPORT_SYMBOL(wpan_phy_unregister); void wpan_phy_free(struct wpan_phy *phy) { put_device(&phy->dev); } EXPORT_SYMBOL(wpan_phy_free); static void cfg802154_free_peer_structures(struct wpan_dev *wpan_dev) { struct ieee802154_pan_device *child, *tmp; mutex_lock(&wpan_dev->association_lock); kfree(wpan_dev->parent); wpan_dev->parent = NULL; list_for_each_entry_safe(child, tmp, &wpan_dev->children, node) { list_del(&child->node); kfree(child); } wpan_dev->nchildren = 0; mutex_unlock(&wpan_dev->association_lock); } int cfg802154_switch_netns(struct cfg802154_registered_device *rdev, struct net *net) { struct wpan_dev *wpan_dev; int err = 0; list_for_each_entry(wpan_dev, &rdev->wpan_dev_list, list) { if (!wpan_dev->netdev) continue; wpan_dev->netdev->netns_local = false; err = dev_change_net_namespace(wpan_dev->netdev, net, "wpan%d"); if (err) break; wpan_dev->netdev->netns_local = true; } if (err) { /* failed -- clean up to old netns */ net = wpan_phy_net(&rdev->wpan_phy); list_for_each_entry_continue_reverse(wpan_dev, &rdev->wpan_dev_list, list) { if (!wpan_dev->netdev) continue; wpan_dev->netdev->netns_local = false; err = dev_change_net_namespace(wpan_dev->netdev, net, "wpan%d"); WARN_ON(err); wpan_dev->netdev->netns_local = true; } return err; } wpan_phy_net_set(&rdev->wpan_phy, net); err = device_rename(&rdev->wpan_phy.dev, dev_name(&rdev->wpan_phy.dev)); WARN_ON(err); return 0; } void cfg802154_dev_free(struct cfg802154_registered_device *rdev) { kfree(rdev); } static void cfg802154_update_iface_num(struct cfg802154_registered_device *rdev, int iftype, int num) { ASSERT_RTNL(); rdev->num_running_ifaces += num; } static int cfg802154_netdev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct wpan_dev *wpan_dev = dev->ieee802154_ptr; struct cfg802154_registered_device *rdev; if (!wpan_dev) return NOTIFY_DONE; rdev = wpan_phy_to_rdev(wpan_dev->wpan_phy); /* TODO WARN_ON unspec type */ switch (state) { /* TODO NETDEV_DEVTYPE */ case NETDEV_REGISTER: dev->netns_local = true; wpan_dev->identifier = ++rdev->wpan_dev_id; list_add_rcu(&wpan_dev->list, &rdev->wpan_dev_list); rdev->devlist_generation++; mutex_init(&wpan_dev->association_lock); INIT_LIST_HEAD(&wpan_dev->children); wpan_dev->max_associations = SZ_16K; wpan_dev->netdev = dev; break; case NETDEV_DOWN: cfg802154_update_iface_num(rdev, wpan_dev->iftype, -1); rdev->opencount--; wake_up(&rdev->dev_wait); break; case NETDEV_UP: cfg802154_update_iface_num(rdev, wpan_dev->iftype, 1); rdev->opencount++; break; case NETDEV_UNREGISTER: cfg802154_free_peer_structures(wpan_dev); /* It is possible to get NETDEV_UNREGISTER * multiple times. To detect that, check * that the interface is still on the list * of registered interfaces, and only then * remove and clean it up. */ if (!list_empty(&wpan_dev->list)) { list_del_rcu(&wpan_dev->list); rdev->devlist_generation++; } /* synchronize (so that we won't find this netdev * from other code any more) and then clear the list * head so that the above code can safely check for * !list_empty() to avoid double-cleanup. */ synchronize_rcu(); INIT_LIST_HEAD(&wpan_dev->list); break; default: return NOTIFY_DONE; } return NOTIFY_OK; } static struct notifier_block cfg802154_netdev_notifier = { .notifier_call = cfg802154_netdev_notifier_call, }; static void __net_exit cfg802154_pernet_exit(struct net *net) { struct cfg802154_registered_device *rdev; rtnl_lock(); list_for_each_entry(rdev, &cfg802154_rdev_list, list) { if (net_eq(wpan_phy_net(&rdev->wpan_phy), net)) WARN_ON(cfg802154_switch_netns(rdev, &init_net)); } rtnl_unlock(); } static struct pernet_operations cfg802154_pernet_ops = { .exit = cfg802154_pernet_exit, }; static int __init wpan_phy_class_init(void) { int rc; rc = register_pernet_device(&cfg802154_pernet_ops); if (rc) goto err; rc = wpan_phy_sysfs_init(); if (rc) goto err_sysfs; rc = register_netdevice_notifier(&cfg802154_netdev_notifier); if (rc) goto err_nl; rc = ieee802154_nl_init(); if (rc) goto err_notifier; rc = nl802154_init(); if (rc) goto err_ieee802154_nl; return 0; err_ieee802154_nl: ieee802154_nl_exit(); err_notifier: unregister_netdevice_notifier(&cfg802154_netdev_notifier); err_nl: wpan_phy_sysfs_exit(); err_sysfs: unregister_pernet_device(&cfg802154_pernet_ops); err: return rc; } subsys_initcall(wpan_phy_class_init); static void __exit wpan_phy_class_exit(void) { nl802154_exit(); ieee802154_nl_exit(); unregister_netdevice_notifier(&cfg802154_netdev_notifier); wpan_phy_sysfs_exit(); unregister_pernet_device(&cfg802154_pernet_ops); } module_exit(wpan_phy_class_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("IEEE 802.15.4 configuration interface"); MODULE_AUTHOR("Dmitry Eremin-Solenikov");
564 97 86 459 2514 79 1114 1979 1394 140 8110 1486 2684 938 9028 532 9036 2995 2229 7409 295 8045 8044 8033 26 26 26 8966 4678 7349 37 4494 3807 3825 1965 1966 2972 533 1661 1487 925 205 3534 110 1201 2822 253 105 86 25 25 20 11 18 15 24 15 870 464 335 374 375 634 284 117 117 117 76 76 117 117 117 117 3588 854 1199 1835 2705 499 498 1 9 97 499 34 498 447 291 496 3982 3597 3599 3602 3159 3597 2295 3986 1488 1410 85 15 1975 2688 2676 304 304 304 304 42 303 1290 3246 2955 910 4452 1344 205 4276 35 4282 1705 3286 3215 140 3985 1151 4281 4274 3021 126 126 905 1016 28 1012 3301 5 76 77 79 1664 1235 1109 677 832 564 2678 46 1304 1985 1398 158 1125 86 157 808 1664 157 7 7 7 7 7 7 7 7 7 7 7 23 9 20 22 21 1 7 7 3 1 6 2 2 2 2 1 1 1 1 209 37 200 11 58 186 147 145 146 14 185 185 112 2912 47 367 470 2921 750 938 890 327 537 177 382 459 2804 47 8 58 2808 1927 1256 1904 1183 336 53 1913 1902 1912 1824 25 1818 1905 1188 1659 1155 195 894 8 24 742 325 554 553 325 485 66 65 66 6149 6140 6141 1202 1430 3 922 919 919 1510 559 954 1509 1502 259 1 259 259 189 170 14 14 188 104 103 104 104 104 104 104 104 40 10 30 7 31 5 5 5 5 1230 1230 1227 923 927 925 926 64 67 31 33 75 76 76 76 3 75 3 3 3 3 1731 1728 155 1591 94 1731 1222 734 42 486 484 3 482 483 42 42 487 485 41 908 906 909 471 448 832 76 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 // SPDX-License-Identifier: GPL-2.0+ /* * XArray implementation * Copyright (c) 2017-2018 Microsoft Corporation * Copyright (c) 2018-2020 Oracle * Author: Matthew Wilcox <willy@infradead.org> */ #include <linux/bitmap.h> #include <linux/export.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/xarray.h> #include "radix-tree.h" /* * Coding conventions in this file: * * @xa is used to refer to the entire xarray. * @xas is the 'xarray operation state'. It may be either a pointer to * an xa_state, or an xa_state stored on the stack. This is an unfortunate * ambiguity. * @index is the index of the entry being operated on * @mark is an xa_mark_t; a small number indicating one of the mark bits. * @node refers to an xa_node; usually the primary one being operated on by * this function. * @offset is the index into the slots array inside an xa_node. * @parent refers to the @xa_node closer to the head than @node. * @entry refers to something stored in a slot in the xarray */ static inline unsigned int xa_lock_type(const struct xarray *xa) { return (__force unsigned int)xa->xa_flags & 3; } static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) { if (lock_type == XA_LOCK_IRQ) xas_lock_irq(xas); else if (lock_type == XA_LOCK_BH) xas_lock_bh(xas); else xas_lock(xas); } static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) { if (lock_type == XA_LOCK_IRQ) xas_unlock_irq(xas); else if (lock_type == XA_LOCK_BH) xas_unlock_bh(xas); else xas_unlock(xas); } static inline bool xa_track_free(const struct xarray *xa) { return xa->xa_flags & XA_FLAGS_TRACK_FREE; } static inline bool xa_zero_busy(const struct xarray *xa) { return xa->xa_flags & XA_FLAGS_ZERO_BUSY; } static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) { if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) xa->xa_flags |= XA_FLAGS_MARK(mark); } static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark) { if (xa->xa_flags & XA_FLAGS_MARK(mark)) xa->xa_flags &= ~(XA_FLAGS_MARK(mark)); } static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark) { return node->marks[(__force unsigned)mark]; } static inline bool node_get_mark(struct xa_node *node, unsigned int offset, xa_mark_t mark) { return test_bit(offset, node_marks(node, mark)); } /* returns true if the bit was set */ static inline bool node_set_mark(struct xa_node *node, unsigned int offset, xa_mark_t mark) { return __test_and_set_bit(offset, node_marks(node, mark)); } /* returns true if the bit was set */ static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, xa_mark_t mark) { return __test_and_clear_bit(offset, node_marks(node, mark)); } static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark) { return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE); } static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) { bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE); } #define mark_inc(mark) do { \ mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \ } while (0) /* * xas_squash_marks() - Merge all marks to the first entry * @xas: Array operation state. * * Set a mark on the first entry if any entry has it set. Clear marks on * all sibling entries. */ static void xas_squash_marks(const struct xa_state *xas) { unsigned int mark = 0; unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; if (!xas->xa_sibs) return; do { unsigned long *marks = xas->xa_node->marks[mark]; if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) continue; __set_bit(xas->xa_offset, marks); bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); } while (mark++ != (__force unsigned)XA_MARK_MAX); } /* extracts the offset within this node from the index */ static unsigned int get_offset(unsigned long index, struct xa_node *node) { return (index >> node->shift) & XA_CHUNK_MASK; } static void xas_set_offset(struct xa_state *xas) { xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); } /* move the index either forwards (find) or backwards (sibling slot) */ static void xas_move_index(struct xa_state *xas, unsigned long offset) { unsigned int shift = xas->xa_node->shift; xas->xa_index &= ~XA_CHUNK_MASK << shift; xas->xa_index += offset << shift; } static void xas_next_offset(struct xa_state *xas) { xas->xa_offset++; xas_move_index(xas, xas->xa_offset); } static void *set_bounds(struct xa_state *xas) { xas->xa_node = XAS_BOUNDS; return NULL; } /* * Starts a walk. If the @xas is already valid, we assume that it's on * the right path and just return where we've got to. If we're in an * error state, return NULL. If the index is outside the current scope * of the xarray, return NULL without changing @xas->xa_node. Otherwise * set @xas->xa_node to NULL and return the current head of the array. */ static void *xas_start(struct xa_state *xas) { void *entry; if (xas_valid(xas)) return xas_reload(xas); if (xas_error(xas)) return NULL; entry = xa_head(xas->xa); if (!xa_is_node(entry)) { if (xas->xa_index) return set_bounds(xas); } else { if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) return set_bounds(xas); } xas->xa_node = NULL; return entry; } static __always_inline void *xas_descend(struct xa_state *xas, struct xa_node *node) { unsigned int offset = get_offset(xas->xa_index, node); void *entry = xa_entry(xas->xa, node, offset); xas->xa_node = node; while (xa_is_sibling(entry)) { offset = xa_to_sibling(entry); entry = xa_entry(xas->xa, node, offset); if (node->shift && xa_is_node(entry)) entry = XA_RETRY_ENTRY; } xas->xa_offset = offset; return entry; } /** * xas_load() - Load an entry from the XArray (advanced). * @xas: XArray operation state. * * Usually walks the @xas to the appropriate state to load the entry * stored at xa_index. However, it will do nothing and return %NULL if * @xas is in an error state. xas_load() will never expand the tree. * * If the xa_state is set up to operate on a multi-index entry, xas_load() * may return %NULL or an internal entry, even if there are entries * present within the range specified by @xas. * * Context: Any context. The caller should hold the xa_lock or the RCU lock. * Return: Usually an entry in the XArray, but see description for exceptions. */ void *xas_load(struct xa_state *xas) { void *entry = xas_start(xas); while (xa_is_node(entry)) { struct xa_node *node = xa_to_node(entry); if (xas->xa_shift > node->shift) break; entry = xas_descend(xas, node); if (node->shift == 0) break; } return entry; } EXPORT_SYMBOL_GPL(xas_load); #define XA_RCU_FREE ((struct xarray *)1) static void xa_node_free(struct xa_node *node) { XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); node->array = XA_RCU_FREE; call_rcu(&node->rcu_head, radix_tree_node_rcu_free); } /* * xas_destroy() - Free any resources allocated during the XArray operation. * @xas: XArray operation state. * * Most users will not need to call this function; it is called for you * by xas_nomem(). */ void xas_destroy(struct xa_state *xas) { struct xa_node *next, *node = xas->xa_alloc; while (node) { XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); next = rcu_dereference_raw(node->parent); radix_tree_node_rcu_free(&node->rcu_head); xas->xa_alloc = node = next; } } /** * xas_nomem() - Allocate memory if needed. * @xas: XArray operation state. * @gfp: Memory allocation flags. * * If we need to add new nodes to the XArray, we try to allocate memory * with GFP_NOWAIT while holding the lock, which will usually succeed. * If it fails, @xas is flagged as needing memory to continue. The caller * should drop the lock and call xas_nomem(). If xas_nomem() succeeds, * the caller should retry the operation. * * Forward progress is guaranteed as one node is allocated here and * stored in the xa_state where it will be found by xas_alloc(). More * nodes will likely be found in the slab allocator, but we do not tie * them up here. * * Return: true if memory was needed, and was successfully allocated. */ bool xas_nomem(struct xa_state *xas, gfp_t gfp) { if (xas->xa_node != XA_ERROR(-ENOMEM)) { xas_destroy(xas); return false; } if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) gfp |= __GFP_ACCOUNT; xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!xas->xa_alloc) return false; xas->xa_alloc->parent = NULL; XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); xas->xa_node = XAS_RESTART; return true; } EXPORT_SYMBOL_GPL(xas_nomem); /* * __xas_nomem() - Drop locks and allocate memory if needed. * @xas: XArray operation state. * @gfp: Memory allocation flags. * * Internal variant of xas_nomem(). * * Return: true if memory was needed, and was successfully allocated. */ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) __must_hold(xas->xa->xa_lock) { unsigned int lock_type = xa_lock_type(xas->xa); if (xas->xa_node != XA_ERROR(-ENOMEM)) { xas_destroy(xas); return false; } if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) gfp |= __GFP_ACCOUNT; if (gfpflags_allow_blocking(gfp)) { xas_unlock_type(xas, lock_type); xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); xas_lock_type(xas, lock_type); } else { xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); } if (!xas->xa_alloc) return false; xas->xa_alloc->parent = NULL; XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); xas->xa_node = XAS_RESTART; return true; } static void xas_update(struct xa_state *xas, struct xa_node *node) { if (xas->xa_update) xas->xa_update(node); else XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); } static void *xas_alloc(struct xa_state *xas, unsigned int shift) { struct xa_node *parent = xas->xa_node; struct xa_node *node = xas->xa_alloc; if (xas_invalid(xas)) return NULL; if (node) { xas->xa_alloc = NULL; } else { gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) gfp |= __GFP_ACCOUNT; node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) { xas_set_err(xas, -ENOMEM); return NULL; } } if (parent) { node->offset = xas->xa_offset; parent->count++; XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE); xas_update(xas, parent); } XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); node->shift = shift; node->count = 0; node->nr_values = 0; RCU_INIT_POINTER(node->parent, xas->xa_node); node->array = xas->xa; return node; } #ifdef CONFIG_XARRAY_MULTI /* Returns the number of indices covered by a given xa_state */ static unsigned long xas_size(const struct xa_state *xas) { return (xas->xa_sibs + 1UL) << xas->xa_shift; } #endif /* * Use this to calculate the maximum index that will need to be created * in order to add the entry described by @xas. Because we cannot store a * multi-index entry at index 0, the calculation is a little more complex * than you might expect. */ static unsigned long xas_max(struct xa_state *xas) { unsigned long max = xas->xa_index; #ifdef CONFIG_XARRAY_MULTI if (xas->xa_shift || xas->xa_sibs) { unsigned long mask = xas_size(xas) - 1; max |= mask; if (mask == max) max++; } #endif return max; } /* The maximum index that can be contained in the array without expanding it */ static unsigned long max_index(void *entry) { if (!xa_is_node(entry)) return 0; return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; } static void xas_shrink(struct xa_state *xas) { struct xarray *xa = xas->xa; struct xa_node *node = xas->xa_node; for (;;) { void *entry; XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); if (node->count != 1) break; entry = xa_entry_locked(xa, node, 0); if (!entry) break; if (!xa_is_node(entry) && node->shift) break; if (xa_is_zero(entry) && xa_zero_busy(xa)) entry = NULL; xas->xa_node = XAS_BOUNDS; RCU_INIT_POINTER(xa->xa_head, entry); if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK)) xa_mark_clear(xa, XA_FREE_MARK); node->count = 0; node->nr_values = 0; if (!xa_is_node(entry)) RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY); xas_update(xas, node); xa_node_free(node); if (!xa_is_node(entry)) break; node = xa_to_node(entry); node->parent = NULL; } } /* * xas_delete_node() - Attempt to delete an xa_node * @xas: Array operation state. * * Attempts to delete the @xas->xa_node. This will fail if xa->node has * a non-zero reference count. */ static void xas_delete_node(struct xa_state *xas) { struct xa_node *node = xas->xa_node; for (;;) { struct xa_node *parent; XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); if (node->count) break; parent = xa_parent_locked(xas->xa, node); xas->xa_node = parent; xas->xa_offset = node->offset; xa_node_free(node); if (!parent) { xas->xa->xa_head = NULL; xas->xa_node = XAS_BOUNDS; return; } parent->slots[xas->xa_offset] = NULL; parent->count--; XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE); node = parent; xas_update(xas, node); } if (!node->parent) xas_shrink(xas); } /** * xas_free_nodes() - Free this node and all nodes that it references * @xas: Array operation state. * @top: Node to free * * This node has been removed from the tree. We must now free it and all * of its subnodes. There may be RCU walkers with references into the tree, * so we must replace all entries with retry markers. */ static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) { unsigned int offset = 0; struct xa_node *node = top; for (;;) { void *entry = xa_entry_locked(xas->xa, node, offset); if (node->shift && xa_is_node(entry)) { node = xa_to_node(entry); offset = 0; continue; } if (entry) RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY); offset++; while (offset == XA_CHUNK_SIZE) { struct xa_node *parent; parent = xa_parent_locked(xas->xa, node); offset = node->offset + 1; node->count = 0; node->nr_values = 0; xas_update(xas, node); xa_node_free(node); if (node == top) return; node = parent; } } } /* * xas_expand adds nodes to the head of the tree until it has reached * sufficient height to be able to contain @xas->xa_index */ static int xas_expand(struct xa_state *xas, void *head) { struct xarray *xa = xas->xa; struct xa_node *node = NULL; unsigned int shift = 0; unsigned long max = xas_max(xas); if (!head) { if (max == 0) return 0; while ((max >> shift) >= XA_CHUNK_SIZE) shift += XA_CHUNK_SHIFT; return shift + XA_CHUNK_SHIFT; } else if (xa_is_node(head)) { node = xa_to_node(head); shift = node->shift + XA_CHUNK_SHIFT; } xas->xa_node = NULL; while (max > max_index(head)) { xa_mark_t mark = 0; XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); node = xas_alloc(xas, shift); if (!node) return -ENOMEM; node->count = 1; if (xa_is_value(head)) node->nr_values = 1; RCU_INIT_POINTER(node->slots[0], head); /* Propagate the aggregated mark info to the new child */ for (;;) { if (xa_track_free(xa) && mark == XA_FREE_MARK) { node_mark_all(node, XA_FREE_MARK); if (!xa_marked(xa, XA_FREE_MARK)) { node_clear_mark(node, 0, XA_FREE_MARK); xa_mark_set(xa, XA_FREE_MARK); } } else if (xa_marked(xa, mark)) { node_set_mark(node, 0, mark); } if (mark == XA_MARK_MAX) break; mark_inc(mark); } /* * Now that the new node is fully initialised, we can add * it to the tree */ if (xa_is_node(head)) { xa_to_node(head)->offset = 0; rcu_assign_pointer(xa_to_node(head)->parent, node); } head = xa_mk_node(node); rcu_assign_pointer(xa->xa_head, head); xas_update(xas, node); shift += XA_CHUNK_SHIFT; } xas->xa_node = node; return shift; } /* * xas_create() - Create a slot to store an entry in. * @xas: XArray operation state. * @allow_root: %true if we can store the entry in the root directly * * Most users will not need to call this function directly, as it is called * by xas_store(). It is useful for doing conditional store operations * (see the xa_cmpxchg() implementation for an example). * * Return: If the slot already existed, returns the contents of this slot. * If the slot was newly created, returns %NULL. If it failed to create the * slot, returns %NULL and indicates the error in @xas. */ static void *xas_create(struct xa_state *xas, bool allow_root) { struct xarray *xa = xas->xa; void *entry; void __rcu **slot; struct xa_node *node = xas->xa_node; int shift; unsigned int order = xas->xa_shift; if (xas_top(node)) { entry = xa_head_locked(xa); xas->xa_node = NULL; if (!entry && xa_zero_busy(xa)) entry = XA_ZERO_ENTRY; shift = xas_expand(xas, entry); if (shift < 0) return NULL; if (!shift && !allow_root) shift = XA_CHUNK_SHIFT; entry = xa_head_locked(xa); slot = &xa->xa_head; } else if (xas_error(xas)) { return NULL; } else if (node) { unsigned int offset = xas->xa_offset; shift = node->shift; entry = xa_entry_locked(xa, node, offset); slot = &node->slots[offset]; } else { shift = 0; entry = xa_head_locked(xa); slot = &xa->xa_head; } while (shift > order) { shift -= XA_CHUNK_SHIFT; if (!entry) { node = xas_alloc(xas, shift); if (!node) break; if (xa_track_free(xa)) node_mark_all(node, XA_FREE_MARK); rcu_assign_pointer(*slot, xa_mk_node(node)); } else if (xa_is_node(entry)) { node = xa_to_node(entry); } else { break; } entry = xas_descend(xas, node); slot = &node->slots[xas->xa_offset]; } return entry; } /** * xas_create_range() - Ensure that stores to this range will succeed * @xas: XArray operation state. * * Creates all of the slots in the range covered by @xas. Sets @xas to * create single-index entries and positions it at the beginning of the * range. This is for the benefit of users which have not yet been * converted to use multi-index entries. */ void xas_create_range(struct xa_state *xas) { unsigned long index = xas->xa_index; unsigned char shift = xas->xa_shift; unsigned char sibs = xas->xa_sibs; xas->xa_index |= ((sibs + 1UL) << shift) - 1; if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift) xas->xa_offset |= sibs; xas->xa_shift = 0; xas->xa_sibs = 0; for (;;) { xas_create(xas, true); if (xas_error(xas)) goto restore; if (xas->xa_index <= (index | XA_CHUNK_MASK)) goto success; xas->xa_index -= XA_CHUNK_SIZE; for (;;) { struct xa_node *node = xas->xa_node; if (node->shift >= shift) break; xas->xa_node = xa_parent_locked(xas->xa, node); xas->xa_offset = node->offset - 1; if (node->offset != 0) break; } } restore: xas->xa_shift = shift; xas->xa_sibs = sibs; xas->xa_index = index; return; success: xas->xa_index = index; if (xas->xa_node) xas_set_offset(xas); } EXPORT_SYMBOL_GPL(xas_create_range); static void update_node(struct xa_state *xas, struct xa_node *node, int count, int values) { if (!node || (!count && !values)) return; node->count += count; node->nr_values += values; XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE); xas_update(xas, node); if (count < 0) xas_delete_node(xas); } /** * xas_store() - Store this entry in the XArray. * @xas: XArray operation state. * @entry: New entry. * * If @xas is operating on a multi-index entry, the entry returned by this * function is essentially meaningless (it may be an internal entry or it * may be %NULL, even if there are non-NULL entries at some of the indices * covered by the range). This is not a problem for any current users, * and can be changed if needed. * * Return: The old entry at this index. */ void *xas_store(struct xa_state *xas, void *entry) { struct xa_node *node; void __rcu **slot = &xas->xa->xa_head; unsigned int offset, max; int count = 0; int values = 0; void *first, *next; bool value = xa_is_value(entry); if (entry) { bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry); first = xas_create(xas, allow_root); } else { first = xas_load(xas); } if (xas_invalid(xas)) return first; node = xas->xa_node; if (node && (xas->xa_shift < node->shift)) xas->xa_sibs = 0; if ((first == entry) && !xas->xa_sibs) return first; next = first; offset = xas->xa_offset; max = xas->xa_offset + xas->xa_sibs; if (node) { slot = &node->slots[offset]; if (xas->xa_sibs) xas_squash_marks(xas); } if (!entry) xas_init_marks(xas); for (;;) { /* * Must clear the marks before setting the entry to NULL, * otherwise xas_for_each_marked may find a NULL entry and * stop early. rcu_assign_pointer contains a release barrier * so the mark clearing will appear to happen before the * entry is set to NULL. */ rcu_assign_pointer(*slot, entry); if (xa_is_node(next) && (!node || node->shift)) xas_free_nodes(xas, xa_to_node(next)); if (!node) break; count += !next - !entry; values += !xa_is_value(first) - !value; if (entry) { if (offset == max) break; if (!xa_is_sibling(entry)) entry = xa_mk_sibling(xas->xa_offset); } else { if (offset == XA_CHUNK_MASK) break; } next = xa_entry_locked(xas->xa, node, ++offset); if (!xa_is_sibling(next)) { if (!entry && (offset > max)) break; first = next; } slot++; } update_node(xas, node, count, values); return first; } EXPORT_SYMBOL_GPL(xas_store); /** * xas_get_mark() - Returns the state of this mark. * @xas: XArray operation state. * @mark: Mark number. * * Return: true if the mark is set, false if the mark is clear or @xas * is in an error state. */ bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark) { if (xas_invalid(xas)) return false; if (!xas->xa_node) return xa_marked(xas->xa, mark); return node_get_mark(xas->xa_node, xas->xa_offset, mark); } EXPORT_SYMBOL_GPL(xas_get_mark); /** * xas_set_mark() - Sets the mark on this entry and its parents. * @xas: XArray operation state. * @mark: Mark number. * * Sets the specified mark on this entry, and walks up the tree setting it * on all the ancestor entries. Does nothing if @xas has not been walked to * an entry, or is in an error state. */ void xas_set_mark(const struct xa_state *xas, xa_mark_t mark) { struct xa_node *node = xas->xa_node; unsigned int offset = xas->xa_offset; if (xas_invalid(xas)) return; while (node) { if (node_set_mark(node, offset, mark)) return; offset = node->offset; node = xa_parent_locked(xas->xa, node); } if (!xa_marked(xas->xa, mark)) xa_mark_set(xas->xa, mark); } EXPORT_SYMBOL_GPL(xas_set_mark); /** * xas_clear_mark() - Clears the mark on this entry and its parents. * @xas: XArray operation state. * @mark: Mark number. * * Clears the specified mark on this entry, and walks back to the head * attempting to clear it on all the ancestor entries. Does nothing if * @xas has not been walked to an entry, or is in an error state. */ void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark) { struct xa_node *node = xas->xa_node; unsigned int offset = xas->xa_offset; if (xas_invalid(xas)) return; while (node) { if (!node_clear_mark(node, offset, mark)) return; if (node_any_mark(node, mark)) return; offset = node->offset; node = xa_parent_locked(xas->xa, node); } if (xa_marked(xas->xa, mark)) xa_mark_clear(xas->xa, mark); } EXPORT_SYMBOL_GPL(xas_clear_mark); /** * xas_init_marks() - Initialise all marks for the entry * @xas: Array operations state. * * Initialise all marks for the entry specified by @xas. If we're tracking * free entries with a mark, we need to set it on all entries. All other * marks are cleared. * * This implementation is not as efficient as it could be; we may walk * up the tree multiple times. */ void xas_init_marks(const struct xa_state *xas) { xa_mark_t mark = 0; for (;;) { if (xa_track_free(xas->xa) && mark == XA_FREE_MARK) xas_set_mark(xas, mark); else xas_clear_mark(xas, mark); if (mark == XA_MARK_MAX) break; mark_inc(mark); } } EXPORT_SYMBOL_GPL(xas_init_marks); #ifdef CONFIG_XARRAY_MULTI static unsigned int node_get_marks(struct xa_node *node, unsigned int offset) { unsigned int marks = 0; xa_mark_t mark = XA_MARK_0; for (;;) { if (node_get_mark(node, offset, mark)) marks |= 1 << (__force unsigned int)mark; if (mark == XA_MARK_MAX) break; mark_inc(mark); } return marks; } static inline void node_mark_slots(struct xa_node *node, unsigned int sibs, xa_mark_t mark) { int i; if (sibs == 0) node_mark_all(node, mark); else { for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1) node_set_mark(node, i, mark); } } static void node_set_marks(struct xa_node *node, unsigned int offset, struct xa_node *child, unsigned int sibs, unsigned int marks) { xa_mark_t mark = XA_MARK_0; for (;;) { if (marks & (1 << (__force unsigned int)mark)) { node_set_mark(node, offset, mark); if (child) node_mark_slots(child, sibs, mark); } if (mark == XA_MARK_MAX) break; mark_inc(mark); } } /** * xas_split_alloc() - Allocate memory for splitting an entry. * @xas: XArray operation state. * @entry: New entry which will be stored in the array. * @order: Current entry order. * @gfp: Memory allocation flags. * * This function should be called before calling xas_split(). * If necessary, it will allocate new nodes (and fill them with @entry) * to prepare for the upcoming split of an entry of @order size into * entries of the order stored in the @xas. * * Context: May sleep if @gfp flags permit. */ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, gfp_t gfp) { unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; unsigned int mask = xas->xa_sibs; /* XXX: no support for splitting really large entries yet */ if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order)) goto nomem; if (xas->xa_shift + XA_CHUNK_SHIFT > order) return; do { unsigned int i; void *sibling = NULL; struct xa_node *node; node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) goto nomem; node->array = xas->xa; for (i = 0; i < XA_CHUNK_SIZE; i++) { if ((i & mask) == 0) { RCU_INIT_POINTER(node->slots[i], entry); sibling = xa_mk_sibling(i); } else { RCU_INIT_POINTER(node->slots[i], sibling); } } RCU_INIT_POINTER(node->parent, xas->xa_alloc); xas->xa_alloc = node; } while (sibs-- > 0); return; nomem: xas_destroy(xas); xas_set_err(xas, -ENOMEM); } EXPORT_SYMBOL_GPL(xas_split_alloc); /** * xas_split() - Split a multi-index entry into smaller entries. * @xas: XArray operation state. * @entry: New entry to store in the array. * @order: Current entry order. * * The size of the new entries is set in @xas. The value in @entry is * copied to all the replacement entries. * * Context: Any context. The caller should hold the xa_lock. */ void xas_split(struct xa_state *xas, void *entry, unsigned int order) { unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1; unsigned int offset, marks; struct xa_node *node; void *curr = xas_load(xas); int values = 0; node = xas->xa_node; if (xas_top(node)) return; marks = node_get_marks(node, xas->xa_offset); offset = xas->xa_offset + sibs; do { if (xas->xa_shift < node->shift) { struct xa_node *child = xas->xa_alloc; xas->xa_alloc = rcu_dereference_raw(child->parent); child->shift = node->shift - XA_CHUNK_SHIFT; child->offset = offset; child->count = XA_CHUNK_SIZE; child->nr_values = xa_is_value(entry) ? XA_CHUNK_SIZE : 0; RCU_INIT_POINTER(child->parent, node); node_set_marks(node, offset, child, xas->xa_sibs, marks); rcu_assign_pointer(node->slots[offset], xa_mk_node(child)); if (xa_is_value(curr)) values--; xas_update(xas, child); } else { unsigned int canon = offset - xas->xa_sibs; node_set_marks(node, canon, NULL, 0, marks); rcu_assign_pointer(node->slots[canon], entry); while (offset > canon) rcu_assign_pointer(node->slots[offset--], xa_mk_sibling(canon)); values += (xa_is_value(entry) - xa_is_value(curr)) * (xas->xa_sibs + 1); } } while (offset-- > xas->xa_offset); node->nr_values += values; xas_update(xas, node); } EXPORT_SYMBOL_GPL(xas_split); #endif /** * xas_pause() - Pause a walk to drop a lock. * @xas: XArray operation state. * * Some users need to pause a walk and drop the lock they're holding in * order to yield to a higher priority thread or carry out an operation * on an entry. Those users should call this function before they drop * the lock. It resets the @xas to be suitable for the next iteration * of the loop after the user has reacquired the lock. If most entries * found during a walk require you to call xas_pause(), the xa_for_each() * iterator may be more appropriate. * * Note that xas_pause() only works for forward iteration. If a user needs * to pause a reverse iteration, we will need a xas_pause_rev(). */ void xas_pause(struct xa_state *xas) { struct xa_node *node = xas->xa_node; if (xas_invalid(xas)) return; xas->xa_node = XAS_RESTART; if (node) { unsigned long offset = xas->xa_offset; while (++offset < XA_CHUNK_SIZE) { if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) break; } xas->xa_index += (offset - xas->xa_offset) << node->shift; if (xas->xa_index == 0) xas->xa_node = XAS_BOUNDS; } else { xas->xa_index++; } } EXPORT_SYMBOL_GPL(xas_pause); /* * __xas_prev() - Find the previous entry in the XArray. * @xas: XArray operation state. * * Helper function for xas_prev() which handles all the complex cases * out of line. */ void *__xas_prev(struct xa_state *xas) { void *entry; if (!xas_frozen(xas->xa_node)) xas->xa_index--; if (!xas->xa_node) return set_bounds(xas); if (xas_not_node(xas->xa_node)) return xas_load(xas); if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) xas->xa_offset--; while (xas->xa_offset == 255) { xas->xa_offset = xas->xa_node->offset - 1; xas->xa_node = xa_parent(xas->xa, xas->xa_node); if (!xas->xa_node) return set_bounds(xas); } for (;;) { entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); if (!xa_is_node(entry)) return entry; xas->xa_node = xa_to_node(entry); xas_set_offset(xas); } } EXPORT_SYMBOL_GPL(__xas_prev); /* * __xas_next() - Find the next entry in the XArray. * @xas: XArray operation state. * * Helper function for xas_next() which handles all the complex cases * out of line. */ void *__xas_next(struct xa_state *xas) { void *entry; if (!xas_frozen(xas->xa_node)) xas->xa_index++; if (!xas->xa_node) return set_bounds(xas); if (xas_not_node(xas->xa_node)) return xas_load(xas); if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) xas->xa_offset++; while (xas->xa_offset == XA_CHUNK_SIZE) { xas->xa_offset = xas->xa_node->offset + 1; xas->xa_node = xa_parent(xas->xa, xas->xa_node); if (!xas->xa_node) return set_bounds(xas); } for (;;) { entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); if (!xa_is_node(entry)) return entry; xas->xa_node = xa_to_node(entry); xas_set_offset(xas); } } EXPORT_SYMBOL_GPL(__xas_next); /** * xas_find() - Find the next present entry in the XArray. * @xas: XArray operation state. * @max: Highest index to return. * * If the @xas has not yet been walked to an entry, return the entry * which has an index >= xas.xa_index. If it has been walked, the entry * currently being pointed at has been processed, and so we move to the * next entry. * * If no entry is found and the array is smaller than @max, the iterator * is set to the smallest index not yet in the array. This allows @xas * to be immediately passed to xas_store(). * * Return: The entry, if found, otherwise %NULL. */ void *xas_find(struct xa_state *xas, unsigned long max) { void *entry; if (xas_error(xas) || xas->xa_node == XAS_BOUNDS) return NULL; if (xas->xa_index > max) return set_bounds(xas); if (!xas->xa_node) { xas->xa_index = 1; return set_bounds(xas); } else if (xas->xa_node == XAS_RESTART) { entry = xas_load(xas); if (entry || xas_not_node(xas->xa_node)) return entry; } else if (!xas->xa_node->shift && xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) { xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; } xas_next_offset(xas); while (xas->xa_node && (xas->xa_index <= max)) { if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { xas->xa_offset = xas->xa_node->offset + 1; xas->xa_node = xa_parent(xas->xa, xas->xa_node); continue; } entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); if (xa_is_node(entry)) { xas->xa_node = xa_to_node(entry); xas->xa_offset = 0; continue; } if (entry && !xa_is_sibling(entry)) return entry; xas_next_offset(xas); } if (!xas->xa_node) xas->xa_node = XAS_BOUNDS; return NULL; } EXPORT_SYMBOL_GPL(xas_find); /** * xas_find_marked() - Find the next marked entry in the XArray. * @xas: XArray operation state. * @max: Highest index to return. * @mark: Mark number to search for. * * If the @xas has not yet been walked to an entry, return the marked entry * which has an index >= xas.xa_index. If it has been walked, the entry * currently being pointed at has been processed, and so we return the * first marked entry with an index > xas.xa_index. * * If no marked entry is found and the array is smaller than @max, @xas is * set to the bounds state and xas->xa_index is set to the smallest index * not yet in the array. This allows @xas to be immediately passed to * xas_store(). * * If no entry is found before @max is reached, @xas is set to the restart * state. * * Return: The entry, if found, otherwise %NULL. */ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) { bool advance = true; unsigned int offset; void *entry; if (xas_error(xas)) return NULL; if (xas->xa_index > max) goto max; if (!xas->xa_node) { xas->xa_index = 1; goto out; } else if (xas_top(xas->xa_node)) { advance = false; entry = xa_head(xas->xa); xas->xa_node = NULL; if (xas->xa_index > max_index(entry)) goto out; if (!xa_is_node(entry)) { if (xa_marked(xas->xa, mark)) return entry; xas->xa_index = 1; goto out; } xas->xa_node = xa_to_node(entry); xas->xa_offset = xas->xa_index >> xas->xa_node->shift; } while (xas->xa_index <= max) { if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { xas->xa_offset = xas->xa_node->offset + 1; xas->xa_node = xa_parent(xas->xa, xas->xa_node); if (!xas->xa_node) break; advance = false; continue; } if (!advance) { entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); if (xa_is_sibling(entry)) { xas->xa_offset = xa_to_sibling(entry); xas_move_index(xas, xas->xa_offset); } } offset = xas_find_chunk(xas, advance, mark); if (offset > xas->xa_offset) { advance = false; xas_move_index(xas, offset); /* Mind the wrap */ if ((xas->xa_index - 1) >= max) goto max; xas->xa_offset = offset; if (offset == XA_CHUNK_SIZE) continue; } entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK)) continue; if (!xa_is_node(entry)) return entry; xas->xa_node = xa_to_node(entry); xas_set_offset(xas); } out: if (xas->xa_index > max) goto max; return set_bounds(xas); max: xas->xa_node = XAS_RESTART; return NULL; } EXPORT_SYMBOL_GPL(xas_find_marked); /** * xas_find_conflict() - Find the next present entry in a range. * @xas: XArray operation state. * * The @xas describes both a range and a position within that range. * * Context: Any context. Expects xa_lock to be held. * Return: The next entry in the range covered by @xas or %NULL. */ void *xas_find_conflict(struct xa_state *xas) { void *curr; if (xas_error(xas)) return NULL; if (!xas->xa_node) return NULL; if (xas_top(xas->xa_node)) { curr = xas_start(xas); if (!curr) return NULL; while (xa_is_node(curr)) { struct xa_node *node = xa_to_node(curr); curr = xas_descend(xas, node); } if (curr) return curr; } if (xas->xa_node->shift > xas->xa_shift) return NULL; for (;;) { if (xas->xa_node->shift == xas->xa_shift) { if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs) break; } else if (xas->xa_offset == XA_CHUNK_MASK) { xas->xa_offset = xas->xa_node->offset; xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node); if (!xas->xa_node) break; continue; } curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset); if (xa_is_sibling(curr)) continue; while (xa_is_node(curr)) { xas->xa_node = xa_to_node(curr); xas->xa_offset = 0; curr = xa_entry_locked(xas->xa, xas->xa_node, 0); } if (curr) return curr; } xas->xa_offset -= xas->xa_sibs; return NULL; } EXPORT_SYMBOL_GPL(xas_find_conflict); /** * xa_load() - Load an entry from an XArray. * @xa: XArray. * @index: index into array. * * Context: Any context. Takes and releases the RCU lock. * Return: The entry at @index in @xa. */ void *xa_load(struct xarray *xa, unsigned long index) { XA_STATE(xas, xa, index); void *entry; rcu_read_lock(); do { entry = xas_load(&xas); if (xa_is_zero(entry)) entry = NULL; } while (xas_retry(&xas, entry)); rcu_read_unlock(); return entry; } EXPORT_SYMBOL(xa_load); static void *xas_result(struct xa_state *xas, void *curr) { if (xa_is_zero(curr)) return NULL; if (xas_error(xas)) curr = xas->xa_node; return curr; } /** * __xa_erase() - Erase this entry from the XArray while locked. * @xa: XArray. * @index: Index into array. * * After this function returns, loading from @index will return %NULL. * If the index is part of a multi-index entry, all indices will be erased * and none of the entries will be part of a multi-index entry. * * Context: Any context. Expects xa_lock to be held on entry. * Return: The entry which used to be at this index. */ void *__xa_erase(struct xarray *xa, unsigned long index) { XA_STATE(xas, xa, index); return xas_result(&xas, xas_store(&xas, NULL)); } EXPORT_SYMBOL(__xa_erase); /** * xa_erase() - Erase this entry from the XArray. * @xa: XArray. * @index: Index of entry. * * After this function returns, loading from @index will return %NULL. * If the index is part of a multi-index entry, all indices will be erased * and none of the entries will be part of a multi-index entry. * * Context: Any context. Takes and releases the xa_lock. * Return: The entry which used to be at this index. */ void *xa_erase(struct xarray *xa, unsigned long index) { void *entry; xa_lock(xa); entry = __xa_erase(xa, index); xa_unlock(xa); return entry; } EXPORT_SYMBOL(xa_erase); /** * __xa_store() - Store this entry in the XArray. * @xa: XArray. * @index: Index into array. * @entry: New entry. * @gfp: Memory allocation flags. * * You must already be holding the xa_lock when calling this function. * It will drop the lock if needed to allocate memory, and then reacquire * it afterwards. * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. * Return: The old entry at this index or xa_err() if an error happened. */ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { XA_STATE(xas, xa, index); void *curr; if (WARN_ON_ONCE(xa_is_advanced(entry))) return XA_ERROR(-EINVAL); if (xa_track_free(xa) && !entry) entry = XA_ZERO_ENTRY; do { curr = xas_store(&xas, entry); if (xa_track_free(xa)) xas_clear_mark(&xas, XA_FREE_MARK); } while (__xas_nomem(&xas, gfp)); return xas_result(&xas, curr); } EXPORT_SYMBOL(__xa_store); /** * xa_store() - Store this entry in the XArray. * @xa: XArray. * @index: Index into array. * @entry: New entry. * @gfp: Memory allocation flags. * * After this function returns, loads from this index will return @entry. * Storing into an existing multi-index entry updates the entry of every index. * The marks associated with @index are unaffected unless @entry is %NULL. * * Context: Any context. Takes and releases the xa_lock. * May sleep if the @gfp flags permit. * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation * failed. */ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { void *curr; xa_lock(xa); curr = __xa_store(xa, index, entry, gfp); xa_unlock(xa); return curr; } EXPORT_SYMBOL(xa_store); /** * __xa_cmpxchg() - Store this entry in the XArray. * @xa: XArray. * @index: Index into array. * @old: Old value to test against. * @entry: New entry. * @gfp: Memory allocation flags. * * You must already be holding the xa_lock when calling this function. * It will drop the lock if needed to allocate memory, and then reacquire * it afterwards. * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. * Return: The old entry at this index or xa_err() if an error happened. */ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, void *old, void *entry, gfp_t gfp) { XA_STATE(xas, xa, index); void *curr; if (WARN_ON_ONCE(xa_is_advanced(entry))) return XA_ERROR(-EINVAL); do { curr = xas_load(&xas); if (curr == old) { xas_store(&xas, entry); if (xa_track_free(xa) && entry && !curr) xas_clear_mark(&xas, XA_FREE_MARK); } } while (__xas_nomem(&xas, gfp)); return xas_result(&xas, curr); } EXPORT_SYMBOL(__xa_cmpxchg); /** * __xa_insert() - Store this entry in the XArray if no entry is present. * @xa: XArray. * @index: Index into array. * @entry: New entry. * @gfp: Memory allocation flags. * * Inserting a NULL entry will store a reserved entry (like xa_reserve()) * if no entry is present. Inserting will fail if a reserved entry is * present, even though loading from this index will return NULL. * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. * Return: 0 if the store succeeded. -EBUSY if another entry was present. * -ENOMEM if memory could not be allocated. */ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { XA_STATE(xas, xa, index); void *curr; if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; if (!entry) entry = XA_ZERO_ENTRY; do { curr = xas_load(&xas); if (!curr) { xas_store(&xas, entry); if (xa_track_free(xa)) xas_clear_mark(&xas, XA_FREE_MARK); } else { xas_set_err(&xas, -EBUSY); } } while (__xas_nomem(&xas, gfp)); return xas_error(&xas); } EXPORT_SYMBOL(__xa_insert); #ifdef CONFIG_XARRAY_MULTI static void xas_set_range(struct xa_state *xas, unsigned long first, unsigned long last) { unsigned int shift = 0; unsigned long sibs = last - first; unsigned int offset = XA_CHUNK_MASK; xas_set(xas, first); while ((first & XA_CHUNK_MASK) == 0) { if (sibs < XA_CHUNK_MASK) break; if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK)) break; shift += XA_CHUNK_SHIFT; if (offset == XA_CHUNK_MASK) offset = sibs & XA_CHUNK_MASK; sibs >>= XA_CHUNK_SHIFT; first >>= XA_CHUNK_SHIFT; } offset = first & XA_CHUNK_MASK; if (offset + sibs > XA_CHUNK_MASK) sibs = XA_CHUNK_MASK - offset; if ((((first + sibs + 1) << shift) - 1) > last) sibs -= 1; xas->xa_shift = shift; xas->xa_sibs = sibs; } /** * xa_store_range() - Store this entry at a range of indices in the XArray. * @xa: XArray. * @first: First index to affect. * @last: Last index to affect. * @entry: New entry. * @gfp: Memory allocation flags. * * After this function returns, loads from any index between @first and @last, * inclusive will return @entry. * Storing into an existing multi-index entry updates the entry of every index. * The marks associated with @index are unaffected unless @entry is %NULL. * * Context: Process context. Takes and releases the xa_lock. May sleep * if the @gfp flags permit. * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in * an XArray, or xa_err(-ENOMEM) if memory allocation failed. */ void *xa_store_range(struct xarray *xa, unsigned long first, unsigned long last, void *entry, gfp_t gfp) { XA_STATE(xas, xa, 0); if (WARN_ON_ONCE(xa_is_internal(entry))) return XA_ERROR(-EINVAL); if (last < first) return XA_ERROR(-EINVAL); do { xas_lock(&xas); if (entry) { unsigned int order = BITS_PER_LONG; if (last + 1) order = __ffs(last + 1); xas_set_order(&xas, last, order); xas_create(&xas, true); if (xas_error(&xas)) goto unlock; } do { xas_set_range(&xas, first, last); xas_store(&xas, entry); if (xas_error(&xas)) goto unlock; first += xas_size(&xas); } while (first <= last); unlock: xas_unlock(&xas); } while (xas_nomem(&xas, gfp)); return xas_result(&xas, NULL); } EXPORT_SYMBOL(xa_store_range); /** * xas_get_order() - Get the order of an entry. * @xas: XArray operation state. * * Called after xas_load, the xas should not be in an error state. * * Return: A number between 0 and 63 indicating the order of the entry. */ int xas_get_order(struct xa_state *xas) { int order = 0; if (!xas->xa_node) return 0; for (;;) { unsigned int slot = xas->xa_offset + (1 << order); if (slot >= XA_CHUNK_SIZE) break; if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot))) break; order++; } order += xas->xa_node->shift; return order; } EXPORT_SYMBOL_GPL(xas_get_order); /** * xa_get_order() - Get the order of an entry. * @xa: XArray. * @index: Index of the entry. * * Return: A number between 0 and 63 indicating the order of the entry. */ int xa_get_order(struct xarray *xa, unsigned long index) { XA_STATE(xas, xa, index); int order = 0; void *entry; rcu_read_lock(); entry = xas_load(&xas); if (entry) order = xas_get_order(&xas); rcu_read_unlock(); return order; } EXPORT_SYMBOL(xa_get_order); #endif /* CONFIG_XARRAY_MULTI */ /** * __xa_alloc() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. * @limit: Range for allocated ID. * @entry: New entry. * @gfp: Memory allocation flags. * * Finds an empty entry in @xa between @limit.min and @limit.max, * stores the index into the @id pointer, then stores the entry at * that index. A concurrent lookup will not see an uninitialised @id. * * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set * in xa_init_flags(). * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. * Return: 0 on success, -ENOMEM if memory could not be allocated or * -EBUSY if there are no free entries in @limit. */ int __xa_alloc(struct xarray *xa, u32 *id, void *entry, struct xa_limit limit, gfp_t gfp) { XA_STATE(xas, xa, 0); if (WARN_ON_ONCE(xa_is_advanced(entry))) return -EINVAL; if (WARN_ON_ONCE(!xa_track_free(xa))) return -EINVAL; if (!entry) entry = XA_ZERO_ENTRY; do { xas.xa_index = limit.min; xas_find_marked(&xas, limit.max, XA_FREE_MARK); if (xas.xa_node == XAS_RESTART) xas_set_err(&xas, -EBUSY); else *id = xas.xa_index; xas_store(&xas, entry); xas_clear_mark(&xas, XA_FREE_MARK); } while (__xas_nomem(&xas, gfp)); return xas_error(&xas); } EXPORT_SYMBOL(__xa_alloc); /** * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. * @xa: XArray. * @id: Pointer to ID. * @entry: New entry. * @limit: Range of allocated ID. * @next: Pointer to next ID to allocate. * @gfp: Memory allocation flags. * * Finds an empty entry in @xa between @limit.min and @limit.max, * stores the index into the @id pointer, then stores the entry at * that index. A concurrent lookup will not see an uninitialised @id. * The search for an empty entry will start at @next and will wrap * around if necessary. * * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set * in xa_init_flags(). * * Context: Any context. Expects xa_lock to be held on entry. May * release and reacquire xa_lock if @gfp flags permit. * Return: 0 if the allocation succeeded without wrapping. 1 if the * allocation succeeded after wrapping, -ENOMEM if memory could not be * allocated or -EBUSY if there are no free entries in @limit. */ int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, struct xa_limit limit, u32 *next, gfp_t gfp) { u32 min = limit.min; int ret; limit.min = max(min, *next); ret = __xa_alloc(xa, id, entry, limit, gfp); if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) { xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED; ret = 1; } if (ret < 0 && limit.min > min) { limit.min = min; ret = __xa_alloc(xa, id, entry, limit, gfp); if (ret == 0) ret = 1; } if (ret >= 0) { *next = *id + 1; if (*next == 0) xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED; } return ret; } EXPORT_SYMBOL(__xa_alloc_cyclic); /** * __xa_set_mark() - Set this mark on this entry while locked. * @xa: XArray. * @index: Index of entry. * @mark: Mark number. * * Attempting to set a mark on a %NULL entry does not succeed. * * Context: Any context. Expects xa_lock to be held on entry. */ void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) { XA_STATE(xas, xa, index); void *entry = xas_load(&xas); if (entry) xas_set_mark(&xas, mark); } EXPORT_SYMBOL(__xa_set_mark); /** * __xa_clear_mark() - Clear this mark on this entry while locked. * @xa: XArray. * @index: Index of entry. * @mark: Mark number. * * Context: Any context. Expects xa_lock to be held on entry. */ void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) { XA_STATE(xas, xa, index); void *entry = xas_load(&xas); if (entry) xas_clear_mark(&xas, mark); } EXPORT_SYMBOL(__xa_clear_mark); /** * xa_get_mark() - Inquire whether this mark is set on this entry. * @xa: XArray. * @index: Index of entry. * @mark: Mark number. * * This function uses the RCU read lock, so the result may be out of date * by the time it returns. If you need the result to be stable, use a lock. * * Context: Any context. Takes and releases the RCU lock. * Return: True if the entry at @index has this mark set, false if it doesn't. */ bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) { XA_STATE(xas, xa, index); void *entry; rcu_read_lock(); entry = xas_start(&xas); while (xas_get_mark(&xas, mark)) { if (!xa_is_node(entry)) goto found; entry = xas_descend(&xas, xa_to_node(entry)); } rcu_read_unlock(); return false; found: rcu_read_unlock(); return true; } EXPORT_SYMBOL(xa_get_mark); /** * xa_set_mark() - Set this mark on this entry. * @xa: XArray. * @index: Index of entry. * @mark: Mark number. * * Attempting to set a mark on a %NULL entry does not succeed. * * Context: Process context. Takes and releases the xa_lock. */ void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) { xa_lock(xa); __xa_set_mark(xa, index, mark); xa_unlock(xa); } EXPORT_SYMBOL(xa_set_mark); /** * xa_clear_mark() - Clear this mark on this entry. * @xa: XArray. * @index: Index of entry. * @mark: Mark number. * * Clearing a mark always succeeds. * * Context: Process context. Takes and releases the xa_lock. */ void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) { xa_lock(xa); __xa_clear_mark(xa, index, mark); xa_unlock(xa); } EXPORT_SYMBOL(xa_clear_mark); /** * xa_find() - Search the XArray for an entry. * @xa: XArray. * @indexp: Pointer to an index. * @max: Maximum index to search to. * @filter: Selection criterion. * * Finds the entry in @xa which matches the @filter, and has the lowest * index that is at least @indexp and no more than @max. * If an entry is found, @indexp is updated to be the index of the entry. * This function is protected by the RCU read lock, so it may not find * entries which are being simultaneously added. It will not return an * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). * * Context: Any context. Takes and releases the RCU lock. * Return: The entry, if found, otherwise %NULL. */ void *xa_find(struct xarray *xa, unsigned long *indexp, unsigned long max, xa_mark_t filter) { XA_STATE(xas, xa, *indexp); void *entry; rcu_read_lock(); do { if ((__force unsigned int)filter < XA_MAX_MARKS) entry = xas_find_marked(&xas, max, filter); else entry = xas_find(&xas, max); } while (xas_retry(&xas, entry)); rcu_read_unlock(); if (entry) *indexp = xas.xa_index; return entry; } EXPORT_SYMBOL(xa_find); static bool xas_sibling(struct xa_state *xas) { struct xa_node *node = xas->xa_node; unsigned long mask; if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node) return false; mask = (XA_CHUNK_SIZE << node->shift) - 1; return (xas->xa_index & mask) > ((unsigned long)xas->xa_offset << node->shift); } /** * xa_find_after() - Search the XArray for a present entry. * @xa: XArray. * @indexp: Pointer to an index. * @max: Maximum index to search to. * @filter: Selection criterion. * * Finds the entry in @xa which matches the @filter and has the lowest * index that is above @indexp and no more than @max. * If an entry is found, @indexp is updated to be the index of the entry. * This function is protected by the RCU read lock, so it may miss entries * which are being simultaneously added. It will not return an * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). * * Context: Any context. Takes and releases the RCU lock. * Return: The pointer, if found, otherwise %NULL. */ void *xa_find_after(struct xarray *xa, unsigned long *indexp, unsigned long max, xa_mark_t filter) { XA_STATE(xas, xa, *indexp + 1); void *entry; if (xas.xa_index == 0) return NULL; rcu_read_lock(); for (;;) { if ((__force unsigned int)filter < XA_MAX_MARKS) entry = xas_find_marked(&xas, max, filter); else entry = xas_find(&xas, max); if (xas_invalid(&xas)) break; if (xas_sibling(&xas)) continue; if (!xas_retry(&xas, entry)) break; } rcu_read_unlock(); if (entry) *indexp = xas.xa_index; return entry; } EXPORT_SYMBOL(xa_find_after); static unsigned int xas_extract_present(struct xa_state *xas, void **dst, unsigned long max, unsigned int n) { void *entry; unsigned int i = 0; rcu_read_lock(); xas_for_each(xas, entry, max) { if (xas_retry(xas, entry)) continue; dst[i++] = entry; if (i == n) break; } rcu_read_unlock(); return i; } static unsigned int xas_extract_marked(struct xa_state *xas, void **dst, unsigned long max, unsigned int n, xa_mark_t mark) { void *entry; unsigned int i = 0; rcu_read_lock(); xas_for_each_marked(xas, entry, max, mark) { if (xas_retry(xas, entry)) continue; dst[i++] = entry; if (i == n) break; } rcu_read_unlock(); return i; } /** * xa_extract() - Copy selected entries from the XArray into a normal array. * @xa: The source XArray to copy from. * @dst: The buffer to copy entries into. * @start: The first index in the XArray eligible to be selected. * @max: The last index in the XArray eligible to be selected. * @n: The maximum number of entries to copy. * @filter: Selection criterion. * * Copies up to @n entries that match @filter from the XArray. The * copied entries will have indices between @start and @max, inclusive. * * The @filter may be an XArray mark value, in which case entries which are * marked with that mark will be copied. It may also be %XA_PRESENT, in * which case all entries which are not %NULL will be copied. * * The entries returned may not represent a snapshot of the XArray at a * moment in time. For example, if another thread stores to index 5, then * index 10, calling xa_extract() may return the old contents of index 5 * and the new contents of index 10. Indices not modified while this * function is running will not be skipped. * * If you need stronger guarantees, holding the xa_lock across calls to this * function will prevent concurrent modification. * * Context: Any context. Takes and releases the RCU lock. * Return: The number of entries copied. */ unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, unsigned long max, unsigned int n, xa_mark_t filter) { XA_STATE(xas, xa, start); if (!n) return 0; if ((__force unsigned int)filter < XA_MAX_MARKS) return xas_extract_marked(&xas, dst, max, n, filter); return xas_extract_present(&xas, dst, max, n); } EXPORT_SYMBOL(xa_extract); /** * xa_delete_node() - Private interface for workingset code. * @node: Node to be removed from the tree. * @update: Function to call to update ancestor nodes. * * Context: xa_lock must be held on entry and will not be released. */ void xa_delete_node(struct xa_node *node, xa_update_node_t update) { struct xa_state xas = { .xa = node->array, .xa_index = (unsigned long)node->offset << (node->shift + XA_CHUNK_SHIFT), .xa_shift = node->shift + XA_CHUNK_SHIFT, .xa_offset = node->offset, .xa_node = xa_parent_locked(node->array, node), .xa_update = update, }; xas_store(&xas, NULL); } EXPORT_SYMBOL_GPL(xa_delete_node); /* For the benefit of the test suite */ /** * xa_destroy() - Free all internal data structures. * @xa: XArray. * * After calling this function, the XArray is empty and has freed all memory * allocated for its internal data structures. You are responsible for * freeing the objects referenced by the XArray. * * Context: Any context. Takes and releases the xa_lock, interrupt-safe. */ void xa_destroy(struct xarray *xa) { XA_STATE(xas, xa, 0); unsigned long flags; void *entry; xas.xa_node = NULL; xas_lock_irqsave(&xas, flags); entry = xa_head_locked(xa); RCU_INIT_POINTER(xa->xa_head, NULL); xas_init_marks(&xas); if (xa_zero_busy(xa)) xa_mark_clear(xa, XA_FREE_MARK); /* lockdep checks we're still holding the lock in xas_free_nodes() */ if (xa_is_node(entry)) xas_free_nodes(&xas, xa_to_node(entry)); xas_unlock_irqrestore(&xas, flags); } EXPORT_SYMBOL(xa_destroy); #ifdef XA_DEBUG void xa_dump_node(const struct xa_node *node) { unsigned i, j; if (!node) return; if ((unsigned long)node & 3) { pr_cont("node %px\n", node); return; } pr_cont("node %px %s %d parent %px shift %d count %d values %d " "array %px list %px %px marks", node, node->parent ? "offset" : "max", node->offset, node->parent, node->shift, node->count, node->nr_values, node->array, node->private_list.prev, node->private_list.next); for (i = 0; i < XA_MAX_MARKS; i++) for (j = 0; j < XA_MARK_LONGS; j++) pr_cont(" %lx", node->marks[i][j]); pr_cont("\n"); } void xa_dump_index(unsigned long index, unsigned int shift) { if (!shift) pr_info("%lu: ", index); else if (shift >= BITS_PER_LONG) pr_info("0-%lu: ", ~0UL); else pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1)); } void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift) { if (!entry) return; xa_dump_index(index, shift); if (xa_is_node(entry)) { if (shift == 0) { pr_cont("%px\n", entry); } else { unsigned long i; struct xa_node *node = xa_to_node(entry); xa_dump_node(node); for (i = 0; i < XA_CHUNK_SIZE; i++) xa_dump_entry(node->slots[i], index + (i << node->shift), node->shift); } } else if (xa_is_value(entry)) pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry), xa_to_value(entry), entry); else if (!xa_is_internal(entry)) pr_cont("%px\n", entry); else if (xa_is_retry(entry)) pr_cont("retry (%ld)\n", xa_to_internal(entry)); else if (xa_is_sibling(entry)) pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry)); else if (xa_is_zero(entry)) pr_cont("zero (%ld)\n", xa_to_internal(entry)); else pr_cont("UNKNOWN ENTRY (%px)\n", entry); } void xa_dump(const struct xarray *xa) { void *entry = xa->xa_head; unsigned int shift = 0; pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry, xa->xa_flags, xa_marked(xa, XA_MARK_0), xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2)); if (xa_is_node(entry)) shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT; xa_dump_entry(entry, 0, shift); } #endif
10 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 // SPDX-License-Identifier: GPL-2.0+ /* * Driver for USB Mass Storage compliant devices * SCSI layer glue code * * Current development and maintenance by: * (c) 1999-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net) * * Developed with the assistance of: * (c) 2000 David L. Brown, Jr. (usb-storage@davidb.org) * (c) 2000 Stephen J. Gowdy (SGowdy@lbl.gov) * * Initial work by: * (c) 1999 Michael Gee (michael@linuxspecific.com) * * This driver is based on the 'USB Mass Storage Class' document. This * describes in detail the protocol used to communicate with such * devices. Clearly, the designers had SCSI and ATAPI commands in * mind when they created this document. The commands are all very * similar to commands in the SCSI-II and ATAPI specifications. * * It is important to note that in a number of cases this class * exhibits class-specific exemptions from the USB specification. * Notably the usage of NAK, STALL and ACK differs from the norm, in * that they are used to communicate wait, failed and OK on commands. * * Also, for certain devices, the interrupt endpoint is used to convey * status of a command. */ #include <linux/blkdev.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/mutex.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_devinfo.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include "usb.h" #include "scsiglue.h" #include "debug.h" #include "transport.h" #include "protocol.h" /* * Vendor IDs for companies that seem to include the READ CAPACITY bug * in all their devices */ #define VENDOR_ID_NOKIA 0x0421 #define VENDOR_ID_NIKON 0x04b0 #define VENDOR_ID_PENTAX 0x0a17 #define VENDOR_ID_MOTOROLA 0x22b8 /*********************************************************************** * Host functions ***********************************************************************/ static const char* host_info(struct Scsi_Host *host) { struct us_data *us = host_to_us(host); return us->scsi_name; } static int slave_alloc (struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); /* * Set the INQUIRY transfer length to 36. We don't use any of * the extra data and many devices choke if asked for more or * less than 36 bytes. */ sdev->inquiry_len = 36; /* Tell the SCSI layer if we know there is more than one LUN */ if (us->protocol == USB_PR_BULK && us->max_lun > 0) sdev->sdev_bflags |= BLIST_FORCELUN; /* * Some USB storage devices reset if the IO advice hints grouping mode * page is queried. Hence skip that mode page. */ sdev->sdev_bflags |= BLIST_SKIP_IO_HINTS; return 0; } static int device_configure(struct scsi_device *sdev, struct queue_limits *lim) { struct us_data *us = host_to_us(sdev->host); struct device *dev = us->pusb_dev->bus->sysdev; /* * Many devices have trouble transferring more than 32KB at a time, * while others have trouble with more than 64K. At this time we * are limiting both to 32K (64 sectores). */ if (us->fflags & (US_FL_MAX_SECTORS_64 | US_FL_MAX_SECTORS_MIN)) { unsigned int max_sectors = 64; if (us->fflags & US_FL_MAX_SECTORS_MIN) max_sectors = PAGE_SIZE >> 9; lim->max_hw_sectors = min(lim->max_hw_sectors, max_sectors); } else if (sdev->type == TYPE_TAPE) { /* * Tapes need much higher max_sector limits, so just * raise it to the maximum possible (4 GB / 512) and * let the queue segment size sort out the real limit. */ lim->max_hw_sectors = 0x7FFFFF; } else if (us->pusb_dev->speed >= USB_SPEED_SUPER) { /* * USB3 devices will be limited to 2048 sectors. This gives us * better throughput on most devices. */ lim->max_hw_sectors = 2048; } /* * The max_hw_sectors should be up to maximum size of a mapping for * the device. Otherwise, a DMA API might fail on swiotlb environment. */ lim->max_hw_sectors = min_t(size_t, lim->max_hw_sectors, dma_max_mapping_size(dev) >> SECTOR_SHIFT); /* * We can't put these settings in slave_alloc() because that gets * called before the device type is known. Consequently these * settings can't be overridden via the scsi devinfo mechanism. */ if (sdev->type == TYPE_DISK) { /* * Some vendors seem to put the READ CAPACITY bug into * all their devices -- primarily makers of cell phones * and digital cameras. Since these devices always use * flash media and can be expected to have an even number * of sectors, we will always enable the CAPACITY_HEURISTICS * flag unless told otherwise. */ switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) { case VENDOR_ID_NOKIA: case VENDOR_ID_NIKON: case VENDOR_ID_PENTAX: case VENDOR_ID_MOTOROLA: if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK))) us->fflags |= US_FL_CAPACITY_HEURISTICS; break; } /* * Disk-type devices use MODE SENSE(6) if the protocol * (SubClass) is Transparent SCSI, otherwise they use * MODE SENSE(10). */ if (us->subclass != USB_SC_SCSI && us->subclass != USB_SC_CYP_ATACB) sdev->use_10_for_ms = 1; /* *Many disks only accept MODE SENSE transfer lengths of * 192 bytes (that's what Windows uses). */ sdev->use_192_bytes_for_3f = 1; /* * Some devices report generic values until the media has been * accessed. Force a READ(10) prior to querying device * characteristics. */ sdev->read_before_ms = 1; /* * Some devices don't like MODE SENSE with page=0x3f, * which is the command used for checking if a device * is write-protected. Now that we tell the sd driver * to do a 192-byte transfer with this command the * majority of devices work fine, but a few still can't * handle it. The sd driver will simply assume those * devices are write-enabled. */ if (us->fflags & US_FL_NO_WP_DETECT) sdev->skip_ms_page_3f = 1; /* * A number of devices have problems with MODE SENSE for * page x08, so we will skip it. */ sdev->skip_ms_page_8 = 1; /* * Some devices don't handle VPD pages correctly, so skip vpd * pages if not forced by SCSI layer. */ sdev->skip_vpd_pages = !sdev->try_vpd_pages; /* Do not attempt to use REPORT SUPPORTED OPERATION CODES */ sdev->no_report_opcodes = 1; /* Do not attempt to use WRITE SAME */ sdev->no_write_same = 1; /* * Some disks return the total number of blocks in response * to READ CAPACITY rather than the highest block number. * If this device makes that mistake, tell the sd driver. */ if (us->fflags & US_FL_FIX_CAPACITY) sdev->fix_capacity = 1; /* * A few disks have two indistinguishable version, one of * which reports the correct capacity and the other does not. * The sd driver has to guess which is the case. */ if (us->fflags & US_FL_CAPACITY_HEURISTICS) sdev->guess_capacity = 1; /* Some devices cannot handle READ_CAPACITY_16 */ if (us->fflags & US_FL_NO_READ_CAPACITY_16) sdev->no_read_capacity_16 = 1; /* * Many devices do not respond properly to READ_CAPACITY_16. * Tell the SCSI layer to try READ_CAPACITY_10 first. * However some USB 3.0 drive enclosures return capacity * modulo 2TB. Those must use READ_CAPACITY_16 */ if (!(us->fflags & US_FL_NEEDS_CAP16)) sdev->try_rc_10_first = 1; /* * assume SPC3 or latter devices support sense size > 18 * unless US_FL_BAD_SENSE quirk is specified. */ if (sdev->scsi_level > SCSI_SPC_2 && !(us->fflags & US_FL_BAD_SENSE)) us->fflags |= US_FL_SANE_SENSE; /* * USB-IDE bridges tend to report SK = 0x04 (Non-recoverable * Hardware Error) when any low-level error occurs, * recoverable or not. Setting this flag tells the SCSI * midlayer to retry such commands, which frequently will * succeed and fix the error. The worst this can lead to * is an occasional series of retries that will all fail. */ sdev->retry_hwerror = 1; /* * USB disks should allow restart. Some drives spin down * automatically, requiring a START-STOP UNIT command. */ sdev->allow_restart = 1; /* * Some USB cardreaders have trouble reading an sdcard's last * sector in a larger then 1 sector read, since the performance * impact is negligible we set this flag for all USB disks */ sdev->last_sector_bug = 1; /* * Enable last-sector hacks for single-target devices using * the Bulk-only transport, unless we already know the * capacity will be decremented or is correct. */ if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK | US_FL_SCM_MULT_TARG)) && us->protocol == USB_PR_BULK) us->use_last_sector_hacks = 1; /* Check if write cache default on flag is set or not */ if (us->fflags & US_FL_WRITE_CACHE) sdev->wce_default_on = 1; /* A few buggy USB-ATA bridges don't understand FUA */ if (us->fflags & US_FL_BROKEN_FUA) sdev->broken_fua = 1; /* Some even totally fail to indicate a cache */ if (us->fflags & US_FL_ALWAYS_SYNC) { /* don't read caching information */ sdev->skip_ms_page_8 = 1; sdev->skip_ms_page_3f = 1; /* assume sync is needed */ sdev->wce_default_on = 1; } } else { /* * Non-disk-type devices don't need to ignore any pages * or to force 192-byte transfer lengths for MODE SENSE. * But they do need to use MODE SENSE(10). */ sdev->use_10_for_ms = 1; /* Some (fake) usb cdrom devices don't like READ_DISC_INFO */ if (us->fflags & US_FL_NO_READ_DISC_INFO) sdev->no_read_disc_info = 1; } /* * The CB and CBI transports have no way to pass LUN values * other than the bits in the second byte of a CDB. But those * bits don't get set to the LUN value if the device reports * scsi_level == 0 (UNKNOWN). Hence such devices must necessarily * be single-LUN. */ if ((us->protocol == USB_PR_CB || us->protocol == USB_PR_CBI) && sdev->scsi_level == SCSI_UNKNOWN) us->max_lun = 0; /* * Some devices choke when they receive a PREVENT-ALLOW MEDIUM * REMOVAL command, so suppress those commands. */ if (us->fflags & US_FL_NOT_LOCKABLE) sdev->lockable = 0; /* * this is to satisfy the compiler, tho I don't think the * return code is ever checked anywhere. */ return 0; } static int target_alloc(struct scsi_target *starget) { struct us_data *us = host_to_us(dev_to_shost(starget->dev.parent)); /* * Some USB drives don't support REPORT LUNS, even though they * report a SCSI revision level above 2. Tell the SCSI layer * not to issue that command; it will perform a normal sequential * scan instead. */ starget->no_report_luns = 1; /* * The UFI spec treats the Peripheral Qualifier bits in an * INQUIRY result as reserved and requires devices to set them * to 0. However the SCSI spec requires these bits to be set * to 3 to indicate when a LUN is not present. * * Let the scanning code know if this target merely sets * Peripheral Device Type to 0x1f to indicate no LUN. */ if (us->subclass == USB_SC_UFI) starget->pdt_1f_for_no_lun = 1; return 0; } /* queue a command */ /* This is always called with scsi_lock(host) held */ static int queuecommand_lck(struct scsi_cmnd *srb) { void (*done)(struct scsi_cmnd *) = scsi_done; struct us_data *us = host_to_us(srb->device->host); /* check for state-transition errors */ if (us->srb != NULL) { dev_err(&us->pusb_intf->dev, "Error in %s: us->srb = %p\n", __func__, us->srb); return SCSI_MLQUEUE_HOST_BUSY; } /* fail the command if we are disconnecting */ if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) { usb_stor_dbg(us, "Fail command during disconnect\n"); srb->result = DID_NO_CONNECT << 16; done(srb); return 0; } if ((us->fflags & US_FL_NO_ATA_1X) && (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) { memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB, sizeof(usb_stor_sense_invalidCDB)); srb->result = SAM_STAT_CHECK_CONDITION; done(srb); return 0; } /* enqueue the command and wake up the control thread */ us->srb = srb; complete(&us->cmnd_ready); return 0; } static DEF_SCSI_QCMD(queuecommand) /*********************************************************************** * Error handling functions ***********************************************************************/ /* Command timeout and abort */ static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match) { /* * us->srb together with the TIMED_OUT, RESETTING, and ABORTING * bits are protected by the host lock. */ scsi_lock(us_to_host(us)); /* is there any active pending command to abort ? */ if (!us->srb) { scsi_unlock(us_to_host(us)); usb_stor_dbg(us, "-- nothing to abort\n"); return SUCCESS; } /* Does the command match the passed srb if any ? */ if (srb_match && us->srb != srb_match) { scsi_unlock(us_to_host(us)); usb_stor_dbg(us, "-- pending command mismatch\n"); return FAILED; } /* * Set the TIMED_OUT bit. Also set the ABORTING bit, but only if * a device reset isn't already in progress (to avoid interfering * with the reset). Note that we must retain the host lock while * calling usb_stor_stop_transport(); otherwise it might interfere * with an auto-reset that begins as soon as we release the lock. */ set_bit(US_FLIDX_TIMED_OUT, &us->dflags); if (!test_bit(US_FLIDX_RESETTING, &us->dflags)) { set_bit(US_FLIDX_ABORTING, &us->dflags); usb_stor_stop_transport(us); } scsi_unlock(us_to_host(us)); /* Wait for the aborted command to finish */ wait_for_completion(&us->notify); return SUCCESS; } static int command_abort(struct scsi_cmnd *srb) { struct us_data *us = host_to_us(srb->device->host); usb_stor_dbg(us, "%s called\n", __func__); return command_abort_matching(us, srb); } /* * This invokes the transport reset mechanism to reset the state of the * device */ static int device_reset(struct scsi_cmnd *srb) { struct us_data *us = host_to_us(srb->device->host); int result; usb_stor_dbg(us, "%s called\n", __func__); /* abort any pending command before reset */ command_abort_matching(us, NULL); /* lock the device pointers and do the reset */ mutex_lock(&(us->dev_mutex)); result = us->transport_reset(us); mutex_unlock(&us->dev_mutex); return result < 0 ? FAILED : SUCCESS; } /* Simulate a SCSI bus reset by resetting the device's USB port. */ static int bus_reset(struct scsi_cmnd *srb) { struct us_data *us = host_to_us(srb->device->host); int result; usb_stor_dbg(us, "%s called\n", __func__); result = usb_stor_port_reset(us); return result < 0 ? FAILED : SUCCESS; } /* * Report a driver-initiated device reset to the SCSI layer. * Calling this for a SCSI-initiated reset is unnecessary but harmless. * The caller must own the SCSI host lock. */ void usb_stor_report_device_reset(struct us_data *us) { int i; struct Scsi_Host *host = us_to_host(us); scsi_report_device_reset(host, 0, 0); if (us->fflags & US_FL_SCM_MULT_TARG) { for (i = 1; i < host->max_id; ++i) scsi_report_device_reset(host, 0, i); } } /* * Report a driver-initiated bus reset to the SCSI layer. * Calling this for a SCSI-initiated reset is unnecessary but harmless. * The caller must not own the SCSI host lock. */ void usb_stor_report_bus_reset(struct us_data *us) { struct Scsi_Host *host = us_to_host(us); scsi_lock(host); scsi_report_bus_reset(host, 0); scsi_unlock(host); } /*********************************************************************** * /proc/scsi/ functions ***********************************************************************/ static int write_info(struct Scsi_Host *host, char *buffer, int length) { /* if someone is sending us data, just throw it away */ return length; } static int show_info (struct seq_file *m, struct Scsi_Host *host) { struct us_data *us = host_to_us(host); const char *string; /* print the controller name */ seq_printf(m, " Host scsi%d: usb-storage\n", host->host_no); /* print product, vendor, and serial number strings */ if (us->pusb_dev->manufacturer) string = us->pusb_dev->manufacturer; else if (us->unusual_dev->vendorName) string = us->unusual_dev->vendorName; else string = "Unknown"; seq_printf(m, " Vendor: %s\n", string); if (us->pusb_dev->product) string = us->pusb_dev->product; else if (us->unusual_dev->productName) string = us->unusual_dev->productName; else string = "Unknown"; seq_printf(m, " Product: %s\n", string); if (us->pusb_dev->serial) string = us->pusb_dev->serial; else string = "None"; seq_printf(m, "Serial Number: %s\n", string); /* show the protocol and transport */ seq_printf(m, " Protocol: %s\n", us->protocol_name); seq_printf(m, " Transport: %s\n", us->transport_name); /* show the device flags */ seq_printf(m, " Quirks:"); #define US_FLAG(name, value) \ if (us->fflags & value) seq_printf(m, " " #name); US_DO_ALL_FLAGS #undef US_FLAG seq_putc(m, '\n'); return 0; } /*********************************************************************** * Sysfs interface ***********************************************************************/ /* Output routine for the sysfs max_sectors file */ static ssize_t max_sectors_show(struct device *dev, struct device_attribute *attr, char *buf) { struct scsi_device *sdev = to_scsi_device(dev); return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue)); } /* Input routine for the sysfs max_sectors file */ static ssize_t max_sectors_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct scsi_device *sdev = to_scsi_device(dev); struct queue_limits lim; unsigned short ms; int ret; if (sscanf(buf, "%hu", &ms) <= 0) return -EINVAL; lim = queue_limits_start_update(sdev->request_queue); lim.max_hw_sectors = ms; ret = queue_limits_commit_update_frozen(sdev->request_queue, &lim); if (ret) return ret; return count; } static DEVICE_ATTR_RW(max_sectors); static struct attribute *usb_sdev_attrs[] = { &dev_attr_max_sectors.attr, NULL, }; ATTRIBUTE_GROUPS(usb_sdev); /* * this defines our host template, with which we'll allocate hosts */ static const struct scsi_host_template usb_stor_host_template = { /* basic userland interface stuff */ .name = "usb-storage", .proc_name = "usb-storage", .show_info = show_info, .write_info = write_info, .info = host_info, /* command interface -- queued only */ .queuecommand = queuecommand, /* error and abort handlers */ .eh_abort_handler = command_abort, .eh_device_reset_handler = device_reset, .eh_bus_reset_handler = bus_reset, /* queue commands only, only one command per LUN */ .can_queue = 1, /* unknown initiator id */ .this_id = -1, .slave_alloc = slave_alloc, .device_configure = device_configure, .target_alloc = target_alloc, /* lots of sg segments can be handled */ .sg_tablesize = SG_MAX_SEGMENTS, /* * Some host controllers may have alignment requirements. * We'll play it safe by requiring 512-byte alignment always. */ .dma_alignment = 511, /* * Limit the total size of a transfer to 120 KB. * * Some devices are known to choke with anything larger. It seems like * the problem stems from the fact that original IDE controllers had * only an 8-bit register to hold the number of sectors in one transfer * and even those couldn't handle a full 256 sectors. * * Because we want to make sure we interoperate with as many devices as * possible, we will maintain a 240 sector transfer size limit for USB * Mass Storage devices. * * Tests show that other operating have similar limits with Microsoft * Windows 7 limiting transfers to 128 sectors for both USB2 and USB3 * and Apple Mac OS X 10.11 limiting transfers to 256 sectors for USB2 * and 2048 for USB3 devices. */ .max_sectors = 240, /* emulated HBA */ .emulated = 1, /* we do our own delay after a device or bus reset */ .skip_settle_delay = 1, /* sysfs device attributes */ .sdev_groups = usb_sdev_groups, /* module management */ .module = THIS_MODULE }; void usb_stor_host_template_init(struct scsi_host_template *sht, const char *name, struct module *owner) { *sht = usb_stor_host_template; sht->name = name; sht->proc_name = name; sht->module = owner; } EXPORT_SYMBOL_GPL(usb_stor_host_template_init); /* To Report "Illegal Request: Invalid Field in CDB */ unsigned char usb_stor_sense_invalidCDB[18] = { [0] = 0x70, /* current error */ [2] = ILLEGAL_REQUEST, /* Illegal Request = 0x05 */ [7] = 0x0a, /* additional length */ [12] = 0x24 /* Invalid Field in CDB */ }; EXPORT_SYMBOL_GPL(usb_stor_sense_invalidCDB);
2 2 2 2 2 2 2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 // SPDX-License-Identifier: GPL-2.0+ /* * Driver for Rio Karma * * (c) 2006 Bob Copeland <me@bobcopeland.com> * (c) 2006 Keith Bennett <keith@mcs.st-and.ac.uk> */ #include <linux/module.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "usb.h" #include "transport.h" #include "debug.h" #include "scsiglue.h" #define DRV_NAME "ums-karma" MODULE_DESCRIPTION("Driver for Rio Karma"); MODULE_AUTHOR("Bob Copeland <me@bobcopeland.com>, Keith Bennett <keith@mcs.st-and.ac.uk>"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("USB_STORAGE"); #define RIO_PREFIX "RIOP\x00" #define RIO_PREFIX_LEN 5 #define RIO_SEND_LEN 40 #define RIO_RECV_LEN 0x200 #define RIO_ENTER_STORAGE 0x1 #define RIO_LEAVE_STORAGE 0x2 #define RIO_RESET 0xC struct karma_data { int in_storage; char *recv; }; static int rio_karma_init(struct us_data *us); /* * The table of devices */ #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \ vendorName, productName, useProtocol, useTransport, \ initFunction, flags) \ { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \ .driver_info = (flags) } static const struct usb_device_id karma_usb_ids[] = { # include "unusual_karma.h" { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, karma_usb_ids); #undef UNUSUAL_DEV /* * The flags table */ #define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \ vendor_name, product_name, use_protocol, use_transport, \ init_function, Flags) \ { \ .vendorName = vendor_name, \ .productName = product_name, \ .useProtocol = use_protocol, \ .useTransport = use_transport, \ .initFunction = init_function, \ } static const struct us_unusual_dev karma_unusual_dev_list[] = { # include "unusual_karma.h" { } /* Terminating entry */ }; #undef UNUSUAL_DEV /* * Send commands to Rio Karma. * * For each command we send 40 bytes starting 'RIOP\0' followed by * the command number and a sequence number, which the device will ack * with a 512-byte packet with the high four bits set and everything * else null. Then we send 'RIOP\x80' followed by a zero and the * sequence number, until byte 5 in the response repeats the sequence * number. */ static int rio_karma_send_command(char cmd, struct us_data *us) { int result; unsigned long timeout; static unsigned char seq = 1; struct karma_data *data = (struct karma_data *) us->extra; usb_stor_dbg(us, "sending command %04x\n", cmd); memset(us->iobuf, 0, RIO_SEND_LEN); memcpy(us->iobuf, RIO_PREFIX, RIO_PREFIX_LEN); us->iobuf[5] = cmd; us->iobuf[6] = seq; timeout = jiffies + msecs_to_jiffies(6000); for (;;) { result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe, us->iobuf, RIO_SEND_LEN, NULL); if (result != USB_STOR_XFER_GOOD) goto err; result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe, data->recv, RIO_RECV_LEN, NULL); if (result != USB_STOR_XFER_GOOD) goto err; if (data->recv[5] == seq) break; if (time_after(jiffies, timeout)) goto err; us->iobuf[4] = 0x80; us->iobuf[5] = 0; msleep(50); } seq++; if (seq == 0) seq = 1; usb_stor_dbg(us, "sent command %04x\n", cmd); return 0; err: usb_stor_dbg(us, "command %04x failed\n", cmd); return USB_STOR_TRANSPORT_FAILED; } /* * Trap START_STOP and READ_10 to leave/re-enter storage mode. * Everything else is propagated to the normal bulk layer. */ static int rio_karma_transport(struct scsi_cmnd *srb, struct us_data *us) { int ret; struct karma_data *data = (struct karma_data *) us->extra; if (srb->cmnd[0] == READ_10 && !data->in_storage) { ret = rio_karma_send_command(RIO_ENTER_STORAGE, us); if (ret) return ret; data->in_storage = 1; return usb_stor_Bulk_transport(srb, us); } else if (srb->cmnd[0] == START_STOP) { ret = rio_karma_send_command(RIO_LEAVE_STORAGE, us); if (ret) return ret; data->in_storage = 0; return rio_karma_send_command(RIO_RESET, us); } return usb_stor_Bulk_transport(srb, us); } static void rio_karma_destructor(void *extra) { struct karma_data *data = (struct karma_data *) extra; kfree(data->recv); } static int rio_karma_init(struct us_data *us) { struct karma_data *data = kzalloc(sizeof(struct karma_data), GFP_NOIO); if (!data) return -ENOMEM; data->recv = kmalloc(RIO_RECV_LEN, GFP_NOIO); if (!data->recv) { kfree(data); return -ENOMEM; } us->extra = data; us->extra_destructor = rio_karma_destructor; if (rio_karma_send_command(RIO_ENTER_STORAGE, us)) return -EIO; data->in_storage = 1; return 0; } static struct scsi_host_template karma_host_template; static int karma_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct us_data *us; int result; result = usb_stor_probe1(&us, intf, id, (id - karma_usb_ids) + karma_unusual_dev_list, &karma_host_template); if (result) return result; us->transport_name = "Rio Karma/Bulk"; us->transport = rio_karma_transport; us->transport_reset = usb_stor_Bulk_reset; result = usb_stor_probe2(us); return result; } static struct usb_driver karma_driver = { .name = DRV_NAME, .probe = karma_probe, .disconnect = usb_stor_disconnect, .suspend = usb_stor_suspend, .resume = usb_stor_resume, .reset_resume = usb_stor_reset_resume, .pre_reset = usb_stor_pre_reset, .post_reset = usb_stor_post_reset, .id_table = karma_usb_ids, .soft_unbind = 1, .no_dynamic_id = 1, }; module_usb_stor_driver(karma_driver, karma_host_template, DRV_NAME);
3042 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Dynamic loading of modules into the kernel. * * Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996 * Rewritten again by Rusty Russell, 2002 */ #ifndef _LINUX_MODULE_H #define _LINUX_MODULE_H #include <linux/list.h> #include <linux/stat.h> #include <linux/buildid.h> #include <linux/compiler.h> #include <linux/cache.h> #include <linux/kmod.h> #include <linux/init.h> #include <linux/elf.h> #include <linux/stringify.h> #include <linux/kobject.h> #include <linux/moduleparam.h> #include <linux/jump_label.h> #include <linux/export.h> #include <linux/rbtree_latch.h> #include <linux/error-injection.h> #include <linux/tracepoint-defs.h> #include <linux/srcu.h> #include <linux/static_call_types.h> #include <linux/dynamic_debug.h> #include <linux/percpu.h> #include <asm/module.h> #define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN struct modversion_info { unsigned long crc; char name[MODULE_NAME_LEN]; }; struct module; struct exception_table_entry; struct module_kobject { struct kobject kobj; struct module *mod; struct kobject *drivers_dir; struct module_param_attrs *mp; struct completion *kobj_completion; } __randomize_layout; struct module_attribute { struct attribute attr; ssize_t (*show)(struct module_attribute *, struct module_kobject *, char *); ssize_t (*store)(struct module_attribute *, struct module_kobject *, const char *, size_t count); void (*setup)(struct module *, const char *); int (*test)(struct module *); void (*free)(struct module *); }; struct module_version_attribute { struct module_attribute mattr; const char *module_name; const char *version; }; extern ssize_t __modver_version_show(struct module_attribute *, struct module_kobject *, char *); extern struct module_attribute module_uevent; /* These are either module local, or the kernel's dummy ones. */ extern int init_module(void); extern void cleanup_module(void); #ifndef MODULE /** * module_init() - driver initialization entry point * @x: function to be run at kernel boot time or module insertion * * module_init() will either be called during do_initcalls() (if * builtin) or at module insertion time (if a module). There can only * be one per module. */ #define module_init(x) __initcall(x); /** * module_exit() - driver exit entry point * @x: function to be run when driver is removed * * module_exit() will wrap the driver clean-up code * with cleanup_module() when used with rmmod when * the driver is a module. If the driver is statically * compiled into the kernel, module_exit() has no effect. * There can only be one per module. */ #define module_exit(x) __exitcall(x); #else /* MODULE */ /* * In most cases loadable modules do not need custom * initcall levels. There are still some valid cases where * a driver may be needed early if built in, and does not * matter when built as a loadable module. Like bus * snooping debug drivers. */ #define early_initcall(fn) module_init(fn) #define core_initcall(fn) module_init(fn) #define core_initcall_sync(fn) module_init(fn) #define postcore_initcall(fn) module_init(fn) #define postcore_initcall_sync(fn) module_init(fn) #define arch_initcall(fn) module_init(fn) #define subsys_initcall(fn) module_init(fn) #define subsys_initcall_sync(fn) module_init(fn) #define fs_initcall(fn) module_init(fn) #define fs_initcall_sync(fn) module_init(fn) #define rootfs_initcall(fn) module_init(fn) #define device_initcall(fn) module_init(fn) #define device_initcall_sync(fn) module_init(fn) #define late_initcall(fn) module_init(fn) #define late_initcall_sync(fn) module_init(fn) #define console_initcall(fn) module_init(fn) /* Each module must use one module_init(). */ #define module_init(initfn) \ static inline initcall_t __maybe_unused __inittest(void) \ { return initfn; } \ int init_module(void) __copy(initfn) \ __attribute__((alias(#initfn))); \ ___ADDRESSABLE(init_module, __initdata); /* This is only required if you want to be unloadable. */ #define module_exit(exitfn) \ static inline exitcall_t __maybe_unused __exittest(void) \ { return exitfn; } \ void cleanup_module(void) __copy(exitfn) \ __attribute__((alias(#exitfn))); \ ___ADDRESSABLE(cleanup_module, __exitdata); #endif /* This means "can be init if no module support, otherwise module load may call it." */ #ifdef CONFIG_MODULES #define __init_or_module #define __initdata_or_module #define __initconst_or_module #define __INIT_OR_MODULE .text #define __INITDATA_OR_MODULE .data #define __INITRODATA_OR_MODULE .section ".rodata","a",%progbits #else #define __init_or_module __init #define __initdata_or_module __initdata #define __initconst_or_module __initconst #define __INIT_OR_MODULE __INIT #define __INITDATA_OR_MODULE __INITDATA #define __INITRODATA_OR_MODULE __INITRODATA #endif /*CONFIG_MODULES*/ /* Generic info of form tag = "info" */ #define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info) /* For userspace: you can also call me... */ #define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias) /* Soft module dependencies. See man modprobe.d for details. * Example: MODULE_SOFTDEP("pre: module-foo module-bar post: module-baz") */ #define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep) /* * Weak module dependencies. See man modprobe.d for details. * Example: MODULE_WEAKDEP("module-foo") */ #define MODULE_WEAKDEP(_weakdep) MODULE_INFO(weakdep, _weakdep) /* * MODULE_FILE is used for generating modules.builtin * So, make it no-op when this is being built as a module */ #ifdef MODULE #define MODULE_FILE #else #define MODULE_FILE MODULE_INFO(file, KBUILD_MODFILE); #endif /* * The following license idents are currently accepted as indicating free * software modules * * "GPL" [GNU Public License v2] * "GPL v2" [GNU Public License v2] * "GPL and additional rights" [GNU Public License v2 rights and more] * "Dual BSD/GPL" [GNU Public License v2 * or BSD license choice] * "Dual MIT/GPL" [GNU Public License v2 * or MIT license choice] * "Dual MPL/GPL" [GNU Public License v2 * or Mozilla license choice] * * The following other idents are available * * "Proprietary" [Non free products] * * Both "GPL v2" and "GPL" (the latter also in dual licensed strings) are * merely stating that the module is licensed under the GPL v2, but are not * telling whether "GPL v2 only" or "GPL v2 or later". The reason why there * are two variants is a historic and failed attempt to convey more * information in the MODULE_LICENSE string. For module loading the * "only/or later" distinction is completely irrelevant and does neither * replace the proper license identifiers in the corresponding source file * nor amends them in any way. The sole purpose is to make the * 'Proprietary' flagging work and to refuse to bind symbols which are * exported with EXPORT_SYMBOL_GPL when a non free module is loaded. * * In the same way "BSD" is not a clear license information. It merely * states, that the module is licensed under one of the compatible BSD * license variants. The detailed and correct license information is again * to be found in the corresponding source files. * * There are dual licensed components, but when running with Linux it is the * GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL * is a GPL combined work. * * This exists for several reasons * 1. So modinfo can show license info for users wanting to vet their setup * is free * 2. So the community can ignore bug reports including proprietary modules * 3. So vendors can do likewise based on their own policies */ #define MODULE_LICENSE(_license) MODULE_FILE MODULE_INFO(license, _license) /* * Author(s), use "Name <email>" or just "Name", for multiple * authors use multiple MODULE_AUTHOR() statements/lines. */ #define MODULE_AUTHOR(_author) MODULE_INFO(author, _author) /* What your module does. */ #define MODULE_DESCRIPTION(_description) MODULE_INFO(description, _description) #ifdef MODULE /* Creates an alias so file2alias.c can find device table. */ #define MODULE_DEVICE_TABLE(type, name) \ extern typeof(name) __mod_device_table__##type##__##name \ __attribute__ ((unused, alias(__stringify(name)))) #else /* !MODULE */ #define MODULE_DEVICE_TABLE(type, name) #endif /* Version of form [<epoch>:]<version>[-<extra-version>]. * Or for CVS/RCS ID version, everything but the number is stripped. * <epoch>: A (small) unsigned integer which allows you to start versions * anew. If not mentioned, it's zero. eg. "2:1.0" is after * "1:2.0". * <version>: The <version> may contain only alphanumerics and the * character `.'. Ordered by numeric sort for numeric parts, * ascii sort for ascii parts (as per RPM or DEB algorithm). * <extraversion>: Like <version>, but inserted for local * customizations, eg "rh3" or "rusty1". * Using this automatically adds a checksum of the .c files and the * local headers in "srcversion". */ #if defined(MODULE) || !defined(CONFIG_SYSFS) #define MODULE_VERSION(_version) MODULE_INFO(version, _version) #else #define MODULE_VERSION(_version) \ MODULE_INFO(version, _version); \ static struct module_version_attribute __modver_attr \ __used __section("__modver") \ __aligned(__alignof__(struct module_version_attribute)) \ = { \ .mattr = { \ .attr = { \ .name = "version", \ .mode = S_IRUGO, \ }, \ .show = __modver_version_show, \ }, \ .module_name = KBUILD_MODNAME, \ .version = _version, \ } #endif /* Optional firmware file (or files) needed by the module * format is simply firmware file name. Multiple firmware * files require multiple MODULE_FIRMWARE() specifiers */ #define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware) #define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, ns) struct notifier_block; #ifdef CONFIG_MODULES extern int modules_disabled; /* for sysctl */ /* Get/put a kernel symbol (calls must be symmetric) */ void *__symbol_get(const char *symbol); void *__symbol_get_gpl(const char *symbol); #define symbol_get(x) ((typeof(&x))(__symbol_get(__stringify(x)))) /* modules using other modules: kdb wants to see this. */ struct module_use { struct list_head source_list; struct list_head target_list; struct module *source, *target; }; enum module_state { MODULE_STATE_LIVE, /* Normal state. */ MODULE_STATE_COMING, /* Full formed, running module_init. */ MODULE_STATE_GOING, /* Going away. */ MODULE_STATE_UNFORMED, /* Still setting it up. */ }; struct mod_tree_node { struct module *mod; struct latch_tree_node node; }; enum mod_mem_type { MOD_TEXT = 0, MOD_DATA, MOD_RODATA, MOD_RO_AFTER_INIT, MOD_INIT_TEXT, MOD_INIT_DATA, MOD_INIT_RODATA, MOD_MEM_NUM_TYPES, MOD_INVALID = -1, }; #define mod_mem_type_is_init(type) \ ((type) == MOD_INIT_TEXT || \ (type) == MOD_INIT_DATA || \ (type) == MOD_INIT_RODATA) #define mod_mem_type_is_core(type) (!mod_mem_type_is_init(type)) #define mod_mem_type_is_text(type) \ ((type) == MOD_TEXT || \ (type) == MOD_INIT_TEXT) #define mod_mem_type_is_data(type) (!mod_mem_type_is_text(type)) #define mod_mem_type_is_core_data(type) \ (mod_mem_type_is_core(type) && \ mod_mem_type_is_data(type)) #define for_each_mod_mem_type(type) \ for (enum mod_mem_type (type) = 0; \ (type) < MOD_MEM_NUM_TYPES; (type)++) #define for_class_mod_mem_type(type, class) \ for_each_mod_mem_type(type) \ if (mod_mem_type_is_##class(type)) struct module_memory { void *base; void *rw_copy; bool is_rox; unsigned int size; #ifdef CONFIG_MODULES_TREE_LOOKUP struct mod_tree_node mtn; #endif }; #ifdef CONFIG_MODULES_TREE_LOOKUP /* Only touch one cacheline for common rbtree-for-core-layout case. */ #define __module_memory_align ____cacheline_aligned #else #define __module_memory_align #endif struct mod_kallsyms { Elf_Sym *symtab; unsigned int num_symtab; char *strtab; char *typetab; }; #ifdef CONFIG_LIVEPATCH /** * struct klp_modinfo - ELF information preserved from the livepatch module * * @hdr: ELF header * @sechdrs: Section header table * @secstrings: String table for the section headers * @symndx: The symbol table section index */ struct klp_modinfo { Elf_Ehdr hdr; Elf_Shdr *sechdrs; char *secstrings; unsigned int symndx; }; #endif struct module { enum module_state state; /* Member of list of modules */ struct list_head list; /* Unique handle for this module */ char name[MODULE_NAME_LEN]; #ifdef CONFIG_STACKTRACE_BUILD_ID /* Module build ID */ unsigned char build_id[BUILD_ID_SIZE_MAX]; #endif /* Sysfs stuff. */ struct module_kobject mkobj; struct module_attribute *modinfo_attrs; const char *version; const char *srcversion; struct kobject *holders_dir; /* Exported symbols */ const struct kernel_symbol *syms; const s32 *crcs; unsigned int num_syms; #ifdef CONFIG_ARCH_USES_CFI_TRAPS s32 *kcfi_traps; s32 *kcfi_traps_end; #endif /* Kernel parameters. */ #ifdef CONFIG_SYSFS struct mutex param_lock; #endif struct kernel_param *kp; unsigned int num_kp; /* GPL-only exported symbols. */ unsigned int num_gpl_syms; const struct kernel_symbol *gpl_syms; const s32 *gpl_crcs; bool using_gplonly_symbols; #ifdef CONFIG_MODULE_SIG /* Signature was verified. */ bool sig_ok; #endif bool async_probe_requested; /* Exception table */ unsigned int num_exentries; struct exception_table_entry *extable; /* Startup function. */ int (*init)(void); struct module_memory mem[MOD_MEM_NUM_TYPES] __module_memory_align; /* Arch-specific module values */ struct mod_arch_specific arch; unsigned long taints; /* same bits as kernel:taint_flags */ #ifdef CONFIG_GENERIC_BUG /* Support for BUG */ unsigned num_bugs; struct list_head bug_list; struct bug_entry *bug_table; #endif #ifdef CONFIG_KALLSYMS /* Protected by RCU and/or module_mutex: use rcu_dereference() */ struct mod_kallsyms __rcu *kallsyms; struct mod_kallsyms core_kallsyms; /* Section attributes */ struct module_sect_attrs *sect_attrs; /* Notes attributes */ struct module_notes_attrs *notes_attrs; #endif /* The command line arguments (may be mangled). People like keeping pointers to this stuff */ char *args; #ifdef CONFIG_SMP /* Per-cpu data. */ void __percpu *percpu; unsigned int percpu_size; #endif void *noinstr_text_start; unsigned int noinstr_text_size; #ifdef CONFIG_TRACEPOINTS unsigned int num_tracepoints; tracepoint_ptr_t *tracepoints_ptrs; #endif #ifdef CONFIG_TREE_SRCU unsigned int num_srcu_structs; struct srcu_struct **srcu_struct_ptrs; #endif #ifdef CONFIG_BPF_EVENTS unsigned int num_bpf_raw_events; struct bpf_raw_event_map *bpf_raw_events; #endif #ifdef CONFIG_DEBUG_INFO_BTF_MODULES unsigned int btf_data_size; unsigned int btf_base_data_size; void *btf_data; void *btf_base_data; #endif #ifdef CONFIG_JUMP_LABEL struct jump_entry *jump_entries; unsigned int num_jump_entries; #endif #ifdef CONFIG_TRACING unsigned int num_trace_bprintk_fmt; const char **trace_bprintk_fmt_start; #endif #ifdef CONFIG_EVENT_TRACING struct trace_event_call **trace_events; unsigned int num_trace_events; struct trace_eval_map **trace_evals; unsigned int num_trace_evals; #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD unsigned int num_ftrace_callsites; unsigned long *ftrace_callsites; #endif #ifdef CONFIG_KPROBES void *kprobes_text_start; unsigned int kprobes_text_size; unsigned long *kprobe_blacklist; unsigned int num_kprobe_blacklist; #endif #ifdef CONFIG_HAVE_STATIC_CALL_INLINE int num_static_call_sites; struct static_call_site *static_call_sites; #endif #if IS_ENABLED(CONFIG_KUNIT) int num_kunit_init_suites; struct kunit_suite **kunit_init_suites; int num_kunit_suites; struct kunit_suite **kunit_suites; #endif #ifdef CONFIG_LIVEPATCH bool klp; /* Is this a livepatch module? */ bool klp_alive; /* ELF information */ struct klp_modinfo *klp_info; #endif #ifdef CONFIG_PRINTK_INDEX unsigned int printk_index_size; struct pi_entry **printk_index_start; #endif #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ struct list_head source_list; /* What modules do I depend on? */ struct list_head target_list; /* Destruction function. */ void (*exit)(void); atomic_t refcnt; #endif #ifdef CONFIG_CONSTRUCTORS /* Constructor functions. */ ctor_fn_t *ctors; unsigned int num_ctors; #endif #ifdef CONFIG_FUNCTION_ERROR_INJECTION struct error_injection_entry *ei_funcs; unsigned int num_ei_funcs; #endif #ifdef CONFIG_DYNAMIC_DEBUG_CORE struct _ddebug_info dyndbg_info; #endif } ____cacheline_aligned __randomize_layout; #ifndef MODULE_ARCH_INIT #define MODULE_ARCH_INIT {} #endif #ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym) { return sym->st_value; } #endif /* FIXME: It'd be nice to isolate modules during init, too, so they aren't used before they (may) fail. But presently too much code (IDE & SCSI) require entry into the module during init.*/ static inline bool module_is_live(struct module *mod) { return mod->state != MODULE_STATE_GOING; } static inline bool module_is_coming(struct module *mod) { return mod->state == MODULE_STATE_COMING; } struct module *__module_text_address(unsigned long addr); struct module *__module_address(unsigned long addr); bool is_module_address(unsigned long addr); bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr); bool is_module_percpu_address(unsigned long addr); bool is_module_text_address(unsigned long addr); static inline bool within_module_mem_type(unsigned long addr, const struct module *mod, enum mod_mem_type type) { unsigned long base, size; base = (unsigned long)mod->mem[type].base; size = mod->mem[type].size; return addr - base < size; } static inline bool within_module_core(unsigned long addr, const struct module *mod) { for_class_mod_mem_type(type, core) { if (within_module_mem_type(addr, mod, type)) return true; } return false; } static inline bool within_module_init(unsigned long addr, const struct module *mod) { for_class_mod_mem_type(type, init) { if (within_module_mem_type(addr, mod, type)) return true; } return false; } static inline bool within_module(unsigned long addr, const struct module *mod) { return within_module_init(addr, mod) || within_module_core(addr, mod); } /* Search for module by name: must be in a RCU-sched critical section. */ struct module *find_module(const char *name); extern void __noreturn __module_put_and_kthread_exit(struct module *mod, long code); #define module_put_and_kthread_exit(code) __module_put_and_kthread_exit(THIS_MODULE, code) #ifdef CONFIG_MODULE_UNLOAD int module_refcount(struct module *mod); void __symbol_put(const char *symbol); #define symbol_put(x) __symbol_put(__stringify(x)) void symbol_put_addr(void *addr); /* Sometimes we know we already have a refcount, and it's easier not to handle the error case (which only happens with rmmod --wait). */ extern void __module_get(struct module *module); /** * try_module_get() - take module refcount unless module is being removed * @module: the module we should check for * * Only try to get a module reference count if the module is not being removed. * This call will fail if the module is in the process of being removed. * * Care must also be taken to ensure the module exists and is alive prior to * usage of this call. This can be gauranteed through two means: * * 1) Direct protection: you know an earlier caller must have increased the * module reference through __module_get(). This can typically be achieved * by having another entity other than the module itself increment the * module reference count. * * 2) Implied protection: there is an implied protection against module * removal. An example of this is the implied protection used by kernfs / * sysfs. The sysfs store / read file operations are guaranteed to exist * through the use of kernfs's active reference (see kernfs_active()) and a * sysfs / kernfs file removal cannot happen unless the same file is not * active. Therefore, if a sysfs file is being read or written to the module * which created it must still exist. It is therefore safe to use * try_module_get() on module sysfs store / read ops. * * One of the real values to try_module_get() is the module_is_live() check * which ensures that the caller of try_module_get() can yield to userspace * module removal requests and gracefully fail if the module is on its way out. * * Returns true if the reference count was successfully incremented. */ extern bool try_module_get(struct module *module); /** * module_put() - release a reference count to a module * @module: the module we should release a reference count for * * If you successfully bump a reference count to a module with try_module_get(), * when you are finished you must call module_put() to release that reference * count. */ extern void module_put(struct module *module); #else /*!CONFIG_MODULE_UNLOAD*/ static inline bool try_module_get(struct module *module) { return !module || module_is_live(module); } static inline void module_put(struct module *module) { } static inline void __module_get(struct module *module) { } #define symbol_put(x) do { } while (0) #define symbol_put_addr(p) do { } while (0) #endif /* CONFIG_MODULE_UNLOAD */ /* This is a #define so the string doesn't get put in every .o file */ #define module_name(mod) \ ({ \ struct module *__mod = (mod); \ __mod ? __mod->name : "kernel"; \ }) /* Dereference module function descriptor */ void *dereference_module_function_descriptor(struct module *mod, void *ptr); int register_module_notifier(struct notifier_block *nb); int unregister_module_notifier(struct notifier_block *nb); extern void print_modules(void); static inline bool module_requested_async_probing(struct module *module) { return module && module->async_probe_requested; } static inline bool is_livepatch_module(struct module *mod) { #ifdef CONFIG_LIVEPATCH return mod->klp; #else return false; #endif } void set_module_sig_enforced(void); void *__module_writable_address(struct module *mod, void *loc); static inline void *module_writable_address(struct module *mod, void *loc) { if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX) || !mod || mod->state != MODULE_STATE_UNFORMED) return loc; return __module_writable_address(mod, loc); } #else /* !CONFIG_MODULES... */ static inline struct module *__module_address(unsigned long addr) { return NULL; } static inline struct module *__module_text_address(unsigned long addr) { return NULL; } static inline bool is_module_address(unsigned long addr) { return false; } static inline bool is_module_percpu_address(unsigned long addr) { return false; } static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) { return false; } static inline bool is_module_text_address(unsigned long addr) { return false; } static inline bool within_module_core(unsigned long addr, const struct module *mod) { return false; } static inline bool within_module_init(unsigned long addr, const struct module *mod) { return false; } static inline bool within_module(unsigned long addr, const struct module *mod) { return false; } /* Get/put a kernel symbol (calls should be symmetric) */ #define symbol_get(x) ({ extern typeof(x) x __attribute__((weak,visibility("hidden"))); &(x); }) #define symbol_put(x) do { } while (0) #define symbol_put_addr(x) do { } while (0) static inline void __module_get(struct module *module) { } static inline bool try_module_get(struct module *module) { return true; } static inline void module_put(struct module *module) { } #define module_name(mod) "kernel" static inline int register_module_notifier(struct notifier_block *nb) { /* no events will happen anyway, so this can always succeed */ return 0; } static inline int unregister_module_notifier(struct notifier_block *nb) { return 0; } #define module_put_and_kthread_exit(code) kthread_exit(code) static inline void print_modules(void) { } static inline bool module_requested_async_probing(struct module *module) { return false; } static inline void set_module_sig_enforced(void) { } /* Dereference module function descriptor */ static inline void *dereference_module_function_descriptor(struct module *mod, void *ptr) { return ptr; } static inline bool module_is_coming(struct module *mod) { return false; } static inline void *module_writable_address(struct module *mod, void *loc) { return loc; } #endif /* CONFIG_MODULES */ #ifdef CONFIG_SYSFS extern struct kset *module_kset; extern const struct kobj_type module_ktype; #endif /* CONFIG_SYSFS */ #define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x) /* BELOW HERE ALL THESE ARE OBSOLETE AND WILL VANISH */ #define __MODULE_STRING(x) __stringify(x) #ifdef CONFIG_GENERIC_BUG void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, struct module *); void module_bug_cleanup(struct module *); #else /* !CONFIG_GENERIC_BUG */ static inline void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { } static inline void module_bug_cleanup(struct module *mod) {} #endif /* CONFIG_GENERIC_BUG */ #ifdef CONFIG_MITIGATION_RETPOLINE extern bool retpoline_module_ok(bool has_retpoline); #else static inline bool retpoline_module_ok(bool has_retpoline) { return true; } #endif #ifdef CONFIG_MODULE_SIG bool is_module_sig_enforced(void); static inline bool module_sig_ok(struct module *module) { return module->sig_ok; } #else /* !CONFIG_MODULE_SIG */ static inline bool is_module_sig_enforced(void) { return false; } static inline bool module_sig_ok(struct module *module) { return true; } #endif /* CONFIG_MODULE_SIG */ #if defined(CONFIG_MODULES) && defined(CONFIG_KALLSYMS) int module_kallsyms_on_each_symbol(const char *modname, int (*fn)(void *, const char *, unsigned long), void *data); /* For kallsyms to ask for address resolution. namebuf should be at * least KSYM_NAME_LEN long: a pointer to namebuf is returned if * found, otherwise NULL. */ int module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, const unsigned char **modbuildid, char *namebuf); int lookup_module_symbol_name(unsigned long addr, char *symname); int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if * symnum out of range. */ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported); /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name); unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name); #else /* CONFIG_MODULES && CONFIG_KALLSYMS */ static inline int module_kallsyms_on_each_symbol(const char *modname, int (*fn)(void *, const char *, unsigned long), void *data) { return -EOPNOTSUPP; } /* For kallsyms to ask for address resolution. NULL means not found. */ static inline int module_address_lookup(unsigned long addr, unsigned long *symbolsize, unsigned long *offset, char **modname, const unsigned char **modbuildid, char *namebuf) { return 0; } static inline int lookup_module_symbol_name(unsigned long addr, char *symname) { return -ERANGE; } static inline int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) { return -ERANGE; } static inline unsigned long module_kallsyms_lookup_name(const char *name) { return 0; } static inline unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name) { return 0; } #endif /* CONFIG_MODULES && CONFIG_KALLSYMS */ #endif /* _LINUX_MODULE_H */
1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 /* * * Author Karsten Keil <kkeil@novell.com> * * Copyright 2008 by Karsten Keil <kkeil@novell.com> * * This code is free software; you can redistribute it and/or modify * it under the terms of the GNU LESSER GENERAL PUBLIC LICENSE * version 2.1 as published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU LESSER GENERAL PUBLIC LICENSE for more details. * */ #ifndef mISDNIF_H #define mISDNIF_H #include <linux/types.h> #include <linux/errno.h> #include <linux/socket.h> /* * ABI Version 32 bit * * <8 bit> Major version * - changed if any interface become backwards incompatible * * <8 bit> Minor version * - changed if any interface is extended but backwards compatible * * <16 bit> Release number * - should be incremented on every checkin */ #define MISDN_MAJOR_VERSION 1 #define MISDN_MINOR_VERSION 1 #define MISDN_RELEASE 29 /* primitives for information exchange * generell format * <16 bit 0 > * <8 bit command> * BIT 8 = 1 LAYER private * BIT 7 = 1 answer * BIT 6 = 1 DATA * <8 bit target layer mask> * * Layer = 00 is reserved for general commands Layer = 01 L2 -> HW Layer = 02 HW -> L2 Layer = 04 L3 -> L2 Layer = 08 L2 -> L3 * Layer = FF is reserved for broadcast commands */ #define MISDN_CMDMASK 0xff00 #define MISDN_LAYERMASK 0x00ff /* generell commands */ #define OPEN_CHANNEL 0x0100 #define CLOSE_CHANNEL 0x0200 #define CONTROL_CHANNEL 0x0300 #define CHECK_DATA 0x0400 /* layer 2 -> layer 1 */ #define PH_ACTIVATE_REQ 0x0101 #define PH_DEACTIVATE_REQ 0x0201 #define PH_DATA_REQ 0x2001 #define MPH_ACTIVATE_REQ 0x0501 #define MPH_DEACTIVATE_REQ 0x0601 #define MPH_INFORMATION_REQ 0x0701 #define PH_CONTROL_REQ 0x0801 /* layer 1 -> layer 2 */ #define PH_ACTIVATE_IND 0x0102 #define PH_ACTIVATE_CNF 0x4102 #define PH_DEACTIVATE_IND 0x0202 #define PH_DEACTIVATE_CNF 0x4202 #define PH_DATA_IND 0x2002 #define PH_DATA_E_IND 0x3002 #define MPH_ACTIVATE_IND 0x0502 #define MPH_DEACTIVATE_IND 0x0602 #define MPH_INFORMATION_IND 0x0702 #define PH_DATA_CNF 0x6002 #define PH_CONTROL_IND 0x0802 #define PH_CONTROL_CNF 0x4802 /* layer 3 -> layer 2 */ #define DL_ESTABLISH_REQ 0x1004 #define DL_RELEASE_REQ 0x1104 #define DL_DATA_REQ 0x3004 #define DL_UNITDATA_REQ 0x3104 #define DL_INFORMATION_REQ 0x0004 /* layer 2 -> layer 3 */ #define DL_ESTABLISH_IND 0x1008 #define DL_ESTABLISH_CNF 0x5008 #define DL_RELEASE_IND 0x1108 #define DL_RELEASE_CNF 0x5108 #define DL_DATA_IND 0x3008 #define DL_UNITDATA_IND 0x3108 #define DL_INFORMATION_IND 0x0008 /* intern layer 2 management */ #define MDL_ASSIGN_REQ 0x1804 #define MDL_ASSIGN_IND 0x1904 #define MDL_REMOVE_REQ 0x1A04 #define MDL_REMOVE_IND 0x1B04 #define MDL_STATUS_UP_IND 0x1C04 #define MDL_STATUS_DOWN_IND 0x1D04 #define MDL_STATUS_UI_IND 0x1E04 #define MDL_ERROR_IND 0x1F04 #define MDL_ERROR_RSP 0x5F04 /* intern layer 2 */ #define DL_TIMER200_IND 0x7004 #define DL_TIMER203_IND 0x7304 #define DL_INTERN_MSG 0x7804 /* DL_INFORMATION_IND types */ #define DL_INFO_L2_CONNECT 0x0001 #define DL_INFO_L2_REMOVED 0x0002 /* PH_CONTROL types */ /* TOUCH TONE IS 0x20XX XX "0"..."9", "A","B","C","D","*","#" */ #define DTMF_TONE_VAL 0x2000 #define DTMF_TONE_MASK 0x007F #define DTMF_TONE_START 0x2100 #define DTMF_TONE_STOP 0x2200 #define DTMF_HFC_COEF 0x4000 #define DSP_CONF_JOIN 0x2403 #define DSP_CONF_SPLIT 0x2404 #define DSP_RECEIVE_OFF 0x2405 #define DSP_RECEIVE_ON 0x2406 #define DSP_ECHO_ON 0x2407 #define DSP_ECHO_OFF 0x2408 #define DSP_MIX_ON 0x2409 #define DSP_MIX_OFF 0x240a #define DSP_DELAY 0x240b #define DSP_JITTER 0x240c #define DSP_TXDATA_ON 0x240d #define DSP_TXDATA_OFF 0x240e #define DSP_TX_DEJITTER 0x240f #define DSP_TX_DEJ_OFF 0x2410 #define DSP_TONE_PATT_ON 0x2411 #define DSP_TONE_PATT_OFF 0x2412 #define DSP_VOL_CHANGE_TX 0x2413 #define DSP_VOL_CHANGE_RX 0x2414 #define DSP_BF_ENABLE_KEY 0x2415 #define DSP_BF_DISABLE 0x2416 #define DSP_BF_ACCEPT 0x2416 #define DSP_BF_REJECT 0x2417 #define DSP_PIPELINE_CFG 0x2418 #define HFC_VOL_CHANGE_TX 0x2601 #define HFC_VOL_CHANGE_RX 0x2602 #define HFC_SPL_LOOP_ON 0x2603 #define HFC_SPL_LOOP_OFF 0x2604 /* for T30 FAX and analog modem */ #define HW_MOD_FRM 0x4000 #define HW_MOD_FRH 0x4001 #define HW_MOD_FTM 0x4002 #define HW_MOD_FTH 0x4003 #define HW_MOD_FTS 0x4004 #define HW_MOD_CONNECT 0x4010 #define HW_MOD_OK 0x4011 #define HW_MOD_NOCARR 0x4012 #define HW_MOD_FCERROR 0x4013 #define HW_MOD_READY 0x4014 #define HW_MOD_LASTDATA 0x4015 /* DSP_TONE_PATT_ON parameter */ #define TONE_OFF 0x0000 #define TONE_GERMAN_DIALTONE 0x0001 #define TONE_GERMAN_OLDDIALTONE 0x0002 #define TONE_AMERICAN_DIALTONE 0x0003 #define TONE_GERMAN_DIALPBX 0x0004 #define TONE_GERMAN_OLDDIALPBX 0x0005 #define TONE_AMERICAN_DIALPBX 0x0006 #define TONE_GERMAN_RINGING 0x0007 #define TONE_GERMAN_OLDRINGING 0x0008 #define TONE_AMERICAN_RINGPBX 0x000b #define TONE_GERMAN_RINGPBX 0x000c #define TONE_GERMAN_OLDRINGPBX 0x000d #define TONE_AMERICAN_RINGING 0x000e #define TONE_GERMAN_BUSY 0x000f #define TONE_GERMAN_OLDBUSY 0x0010 #define TONE_AMERICAN_BUSY 0x0011 #define TONE_GERMAN_HANGUP 0x0012 #define TONE_GERMAN_OLDHANGUP 0x0013 #define TONE_AMERICAN_HANGUP 0x0014 #define TONE_SPECIAL_INFO 0x0015 #define TONE_GERMAN_GASSENBESETZT 0x0016 #define TONE_GERMAN_AUFSCHALTTON 0x0016 /* MPH_INFORMATION_IND */ #define L1_SIGNAL_LOS_OFF 0x0010 #define L1_SIGNAL_LOS_ON 0x0011 #define L1_SIGNAL_AIS_OFF 0x0012 #define L1_SIGNAL_AIS_ON 0x0013 #define L1_SIGNAL_RDI_OFF 0x0014 #define L1_SIGNAL_RDI_ON 0x0015 #define L1_SIGNAL_SLIP_RX 0x0020 #define L1_SIGNAL_SLIP_TX 0x0021 /* * protocol ids * D channel 1-31 * B channel 33 - 63 */ #define ISDN_P_NONE 0 #define ISDN_P_BASE 0 #define ISDN_P_TE_S0 0x01 #define ISDN_P_NT_S0 0x02 #define ISDN_P_TE_E1 0x03 #define ISDN_P_NT_E1 0x04 #define ISDN_P_TE_UP0 0x05 #define ISDN_P_NT_UP0 0x06 #define IS_ISDN_P_TE(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_TE_E1) || \ (p == ISDN_P_TE_UP0) || (p == ISDN_P_LAPD_TE)) #define IS_ISDN_P_NT(p) ((p == ISDN_P_NT_S0) || (p == ISDN_P_NT_E1) || \ (p == ISDN_P_NT_UP0) || (p == ISDN_P_LAPD_NT)) #define IS_ISDN_P_S0(p) ((p == ISDN_P_TE_S0) || (p == ISDN_P_NT_S0)) #define IS_ISDN_P_E1(p) ((p == ISDN_P_TE_E1) || (p == ISDN_P_NT_E1)) #define IS_ISDN_P_UP0(p) ((p == ISDN_P_TE_UP0) || (p == ISDN_P_NT_UP0)) #define ISDN_P_LAPD_TE 0x10 #define ISDN_P_LAPD_NT 0x11 #define ISDN_P_B_MASK 0x1f #define ISDN_P_B_START 0x20 #define ISDN_P_B_RAW 0x21 #define ISDN_P_B_HDLC 0x22 #define ISDN_P_B_X75SLP 0x23 #define ISDN_P_B_L2DTMF 0x24 #define ISDN_P_B_L2DSP 0x25 #define ISDN_P_B_L2DSPHDLC 0x26 #define ISDN_P_B_T30_FAX 0x27 #define ISDN_P_B_MODEM_ASYNC 0x28 #define OPTION_L2_PMX 1 #define OPTION_L2_PTP 2 #define OPTION_L2_FIXEDTEI 3 #define OPTION_L2_CLEANUP 4 #define OPTION_L1_HOLD 5 /* should be in sync with linux/kobject.h:KOBJ_NAME_LEN */ #define MISDN_MAX_IDLEN 20 struct mISDNhead { unsigned int prim; unsigned int id; } __packed; #define MISDN_HEADER_LEN sizeof(struct mISDNhead) #define MAX_DATA_SIZE 2048 #define MAX_DATA_MEM (MAX_DATA_SIZE + MISDN_HEADER_LEN) #define MAX_DFRAME_LEN 260 #define MISDN_ID_ADDR_MASK 0xFFFF #define MISDN_ID_TEI_MASK 0xFF00 #define MISDN_ID_SAPI_MASK 0x00FF #define MISDN_ID_TEI_ANY 0x7F00 #define MISDN_ID_ANY 0xFFFF #define MISDN_ID_NONE 0xFFFE #define GROUP_TEI 127 #define TEI_SAPI 63 #define CTRL_SAPI 0 #define MISDN_MAX_CHANNEL 127 #define MISDN_CHMAP_SIZE ((MISDN_MAX_CHANNEL + 1) >> 3) #define SOL_MISDN 0 struct sockaddr_mISDN { sa_family_t family; unsigned char dev; unsigned char channel; unsigned char sapi; unsigned char tei; }; struct mISDNversion { unsigned char major; unsigned char minor; unsigned short release; }; struct mISDN_devinfo { u_int id; u_int Dprotocols; u_int Bprotocols; u_int protocol; u_char channelmap[MISDN_CHMAP_SIZE]; u_int nrbchan; char name[MISDN_MAX_IDLEN]; }; struct mISDN_devrename { u_int id; char name[MISDN_MAX_IDLEN]; /* new name */ }; /* MPH_INFORMATION_REQ payload */ struct ph_info_ch { __u32 protocol; __u64 Flags; }; struct ph_info_dch { struct ph_info_ch ch; __u16 state; __u16 num_bch; }; struct ph_info { struct ph_info_dch dch; struct ph_info_ch bch[]; }; /* timer device ioctl */ #define IMADDTIMER _IOR('I', 64, int) #define IMDELTIMER _IOR('I', 65, int) /* socket ioctls */ #define IMGETVERSION _IOR('I', 66, int) #define IMGETCOUNT _IOR('I', 67, int) #define IMGETDEVINFO _IOR('I', 68, int) #define IMCTRLREQ _IOR('I', 69, int) #define IMCLEAR_L2 _IOR('I', 70, int) #define IMSETDEVNAME _IOR('I', 71, struct mISDN_devrename) #define IMHOLD_L1 _IOR('I', 72, int) static inline int test_channelmap(u_int nr, u_char *map) { if (nr <= MISDN_MAX_CHANNEL) return map[nr >> 3] & (1 << (nr & 7)); else return 0; } static inline void set_channelmap(u_int nr, u_char *map) { map[nr >> 3] |= (1 << (nr & 7)); } static inline void clear_channelmap(u_int nr, u_char *map) { map[nr >> 3] &= ~(1 << (nr & 7)); } /* CONTROL_CHANNEL parameters */ #define MISDN_CTRL_GETOP 0x0000 #define MISDN_CTRL_LOOP 0x0001 #define MISDN_CTRL_CONNECT 0x0002 #define MISDN_CTRL_DISCONNECT 0x0004 #define MISDN_CTRL_RX_BUFFER 0x0008 #define MISDN_CTRL_PCMCONNECT 0x0010 #define MISDN_CTRL_PCMDISCONNECT 0x0020 #define MISDN_CTRL_SETPEER 0x0040 #define MISDN_CTRL_UNSETPEER 0x0080 #define MISDN_CTRL_RX_OFF 0x0100 #define MISDN_CTRL_FILL_EMPTY 0x0200 #define MISDN_CTRL_GETPEER 0x0400 #define MISDN_CTRL_L1_TIMER3 0x0800 #define MISDN_CTRL_HW_FEATURES_OP 0x2000 #define MISDN_CTRL_HW_FEATURES 0x2001 #define MISDN_CTRL_HFC_OP 0x4000 #define MISDN_CTRL_HFC_PCM_CONN 0x4001 #define MISDN_CTRL_HFC_PCM_DISC 0x4002 #define MISDN_CTRL_HFC_CONF_JOIN 0x4003 #define MISDN_CTRL_HFC_CONF_SPLIT 0x4004 #define MISDN_CTRL_HFC_RECEIVE_OFF 0x4005 #define MISDN_CTRL_HFC_RECEIVE_ON 0x4006 #define MISDN_CTRL_HFC_ECHOCAN_ON 0x4007 #define MISDN_CTRL_HFC_ECHOCAN_OFF 0x4008 #define MISDN_CTRL_HFC_WD_INIT 0x4009 #define MISDN_CTRL_HFC_WD_RESET 0x400A /* special RX buffer value for MISDN_CTRL_RX_BUFFER request.p1 is the minimum * buffer size request.p2 the maximum. Using MISDN_CTRL_RX_SIZE_IGNORE will * not change the value, but still read back the actual stetting. */ #define MISDN_CTRL_RX_SIZE_IGNORE -1 /* socket options */ #define MISDN_TIME_STAMP 0x0001 struct mISDN_ctrl_req { int op; int channel; int p1; int p2; }; /* muxer options */ #define MISDN_OPT_ALL 1 #define MISDN_OPT_TEIMGR 2 #ifdef __KERNEL__ #include <linux/list.h> #include <linux/skbuff.h> #include <linux/net.h> #include <net/sock.h> #include <linux/completion.h> #define DEBUG_CORE 0x000000ff #define DEBUG_CORE_FUNC 0x00000002 #define DEBUG_SOCKET 0x00000004 #define DEBUG_MANAGER 0x00000008 #define DEBUG_SEND_ERR 0x00000010 #define DEBUG_MSG_THREAD 0x00000020 #define DEBUG_QUEUE_FUNC 0x00000040 #define DEBUG_L1 0x0000ff00 #define DEBUG_L1_FSM 0x00000200 #define DEBUG_L2 0x00ff0000 #define DEBUG_L2_FSM 0x00020000 #define DEBUG_L2_CTRL 0x00040000 #define DEBUG_L2_RECV 0x00080000 #define DEBUG_L2_TEI 0x00100000 #define DEBUG_L2_TEIFSM 0x00200000 #define DEBUG_TIMER 0x01000000 #define DEBUG_CLOCK 0x02000000 #define mISDN_HEAD_P(s) ((struct mISDNhead *)&s->cb[0]) #define mISDN_HEAD_PRIM(s) (((struct mISDNhead *)&s->cb[0])->prim) #define mISDN_HEAD_ID(s) (((struct mISDNhead *)&s->cb[0])->id) /* socket states */ #define MISDN_OPEN 1 #define MISDN_BOUND 2 #define MISDN_CLOSED 3 struct mISDNchannel; struct mISDNdevice; struct mISDNstack; struct mISDNclock; struct channel_req { u_int protocol; struct sockaddr_mISDN adr; struct mISDNchannel *ch; }; typedef int (ctrl_func_t)(struct mISDNchannel *, u_int, void *); typedef int (send_func_t)(struct mISDNchannel *, struct sk_buff *); typedef int (create_func_t)(struct channel_req *); struct Bprotocol { struct list_head list; char *name; u_int Bprotocols; create_func_t *create; }; struct mISDNchannel { struct list_head list; u_int protocol; u_int nr; u_long opt; u_int addr; struct mISDNstack *st; struct mISDNchannel *peer; send_func_t *send; send_func_t *recv; ctrl_func_t *ctrl; }; struct mISDN_sock_list { struct hlist_head head; rwlock_t lock; }; struct mISDN_sock { struct sock sk; struct mISDNchannel ch; u_int cmask; struct mISDNdevice *dev; }; struct mISDNdevice { struct mISDNchannel D; u_int id; u_int Dprotocols; u_int Bprotocols; u_int nrbchan; u_char channelmap[MISDN_CHMAP_SIZE]; struct list_head bchannels; struct mISDNchannel *teimgr; struct device dev; }; struct mISDNstack { u_long status; struct mISDNdevice *dev; struct task_struct *thread; struct completion *notify; wait_queue_head_t workq; struct sk_buff_head msgq; struct list_head layer2; struct mISDNchannel *layer1; struct mISDNchannel own; struct mutex lmutex; /* protect lists */ struct mISDN_sock_list l1sock; #ifdef MISDN_MSG_STATS u_int msg_cnt; u_int sleep_cnt; u_int stopped_cnt; #endif }; typedef int (clockctl_func_t)(void *, int); struct mISDNclock { struct list_head list; char name[64]; int pri; clockctl_func_t *ctl; void *priv; }; /* global alloc/queue functions */ static inline struct sk_buff * mI_alloc_skb(unsigned int len, gfp_t gfp_mask) { struct sk_buff *skb; skb = alloc_skb(len + MISDN_HEADER_LEN, gfp_mask); if (likely(skb)) skb_reserve(skb, MISDN_HEADER_LEN); return skb; } static inline struct sk_buff * _alloc_mISDN_skb(u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) { struct sk_buff *skb = mI_alloc_skb(len, gfp_mask); struct mISDNhead *hh; if (!skb) return NULL; if (len) skb_put_data(skb, dp, len); hh = mISDN_HEAD_P(skb); hh->prim = prim; hh->id = id; return skb; } static inline void _queue_data(struct mISDNchannel *ch, u_int prim, u_int id, u_int len, void *dp, gfp_t gfp_mask) { struct sk_buff *skb; if (!ch->peer) return; skb = _alloc_mISDN_skb(prim, id, len, dp, gfp_mask); if (!skb) return; if (ch->recv(ch->peer, skb)) dev_kfree_skb(skb); } /* global register/unregister functions */ extern int mISDN_register_device(struct mISDNdevice *, struct device *parent, char *name); extern void mISDN_unregister_device(struct mISDNdevice *); extern int mISDN_register_Bprotocol(struct Bprotocol *); extern void mISDN_unregister_Bprotocol(struct Bprotocol *); extern struct mISDNclock *mISDN_register_clock(char *, int, clockctl_func_t *, void *); extern void mISDN_unregister_clock(struct mISDNclock *); static inline struct mISDNdevice *dev_to_mISDN(const struct device *dev) { if (dev) return dev_get_drvdata(dev); else return NULL; } extern void set_channel_address(struct mISDNchannel *, u_int, u_int); extern void mISDN_clock_update(struct mISDNclock *, int, ktime_t *); extern unsigned short mISDN_clock_get(void); extern const char *mISDNDevName4ch(struct mISDNchannel *); #endif /* __KERNEL__ */ #endif /* mISDNIF_H */
2 7 3 1 1 10 10 10 1 2 3 4 7 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 // SPDX-License-Identifier: GPL-2.0-only /* Xtables module to match packets using a BPF filter. * Copyright 2013 Google Inc. * Written by Willem de Bruijn <willemb@google.com> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/syscalls.h> #include <linux/skbuff.h> #include <linux/filter.h> #include <linux/bpf.h> #include <linux/netfilter/xt_bpf.h> #include <linux/netfilter/x_tables.h> MODULE_AUTHOR("Willem de Bruijn <willemb@google.com>"); MODULE_DESCRIPTION("Xtables: BPF filter match"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_bpf"); MODULE_ALIAS("ip6t_bpf"); static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len, struct bpf_prog **ret) { struct sock_fprog_kern program; if (len > XT_BPF_MAX_NUM_INSTR) return -EINVAL; program.len = len; program.filter = insns; if (bpf_prog_create(ret, &program)) { pr_info_ratelimited("check failed: parse error\n"); return -EINVAL; } return 0; } static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret) { struct bpf_prog *prog; prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER); if (IS_ERR(prog)) return PTR_ERR(prog); *ret = prog; return 0; } static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) { if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) return -EINVAL; *ret = bpf_prog_get_type_path(path, BPF_PROG_TYPE_SOCKET_FILTER); return PTR_ERR_OR_ZERO(*ret); } static int bpf_mt_check(const struct xt_mtchk_param *par) { struct xt_bpf_info *info = par->matchinfo; return __bpf_mt_check_bytecode(info->bpf_program, info->bpf_program_num_elem, &info->filter); } static int bpf_mt_check_v1(const struct xt_mtchk_param *par) { struct xt_bpf_info_v1 *info = par->matchinfo; if (info->mode == XT_BPF_MODE_BYTECODE) return __bpf_mt_check_bytecode(info->bpf_program, info->bpf_program_num_elem, &info->filter); else if (info->mode == XT_BPF_MODE_FD_ELF) return __bpf_mt_check_fd(info->fd, &info->filter); else if (info->mode == XT_BPF_MODE_PATH_PINNED) return __bpf_mt_check_path(info->path, &info->filter); else return -EINVAL; } static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_bpf_info *info = par->matchinfo; return bpf_prog_run(info->filter, skb); } static bool bpf_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_bpf_info_v1 *info = par->matchinfo; return !!bpf_prog_run_save_cb(info->filter, (struct sk_buff *) skb); } static void bpf_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_bpf_info *info = par->matchinfo; bpf_prog_destroy(info->filter); } static void bpf_mt_destroy_v1(const struct xt_mtdtor_param *par) { const struct xt_bpf_info_v1 *info = par->matchinfo; bpf_prog_destroy(info->filter); } static struct xt_match bpf_mt_reg[] __read_mostly = { { .name = "bpf", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = bpf_mt_check, .match = bpf_mt, .destroy = bpf_mt_destroy, .matchsize = sizeof(struct xt_bpf_info), .usersize = offsetof(struct xt_bpf_info, filter), .me = THIS_MODULE, }, { .name = "bpf", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = bpf_mt_check_v1, .match = bpf_mt_v1, .destroy = bpf_mt_destroy_v1, .matchsize = sizeof(struct xt_bpf_info_v1), .usersize = offsetof(struct xt_bpf_info_v1, filter), .me = THIS_MODULE, }, }; static int __init bpf_mt_init(void) { return xt_register_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg)); } static void __exit bpf_mt_exit(void) { xt_unregister_matches(bpf_mt_reg, ARRAY_SIZE(bpf_mt_reg)); } module_init(bpf_mt_init); module_exit(bpf_mt_exit);
13 3 8 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 // SPDX-License-Identifier: GPL-2.0+ /* * ext4_jbd2.h * * Written by Stephen C. Tweedie <sct@redhat.com>, 1999 * * Copyright 1998--1999 Red Hat corp --- All Rights Reserved * * Ext4-specific journaling extensions. */ #ifndef _EXT4_JBD2_H #define _EXT4_JBD2_H #include <linux/fs.h> #include <linux/jbd2.h> #include "ext4.h" #define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal) /* Define the number of blocks we need to account to a transaction to * modify one block of data. * * We may have to touch one inode, one bitmap buffer, up to three * indirection blocks, the group and superblock summaries, and the data * block to complete the transaction. * * For extents-enabled fs we may have to allocate and modify up to * 5 levels of tree, data block (for each of these we need bitmap + group * summaries), root which is stored in the inode, sb */ #define EXT4_SINGLEDATA_TRANS_BLOCKS(sb) \ (ext4_has_feature_extents(sb) ? 20U : 8U) /* Extended attribute operations touch at most two data buffers, * two bitmap buffers, and two group summaries, in addition to the inode * and the superblock, which are already accounted for. */ #define EXT4_XATTR_TRANS_BLOCKS 6U /* Define the minimum size for a transaction which modifies data. This * needs to take into account the fact that we may end up modifying two * quota files too (one for the group, one for the user quota). The * superblock only gets updated once, of course, so don't bother * counting that again for the quota updates. */ #define EXT4_DATA_TRANS_BLOCKS(sb) (EXT4_SINGLEDATA_TRANS_BLOCKS(sb) + \ EXT4_XATTR_TRANS_BLOCKS - 2 + \ EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) /* * Define the number of metadata blocks we need to account to modify data. * * This include super block, inode block, quota blocks and xattr blocks */ #define EXT4_META_TRANS_BLOCKS(sb) (EXT4_XATTR_TRANS_BLOCKS + \ EXT4_MAXQUOTAS_TRANS_BLOCKS(sb)) /* Define an arbitrary limit for the amount of data we will anticipate * writing to any given transaction. For unbounded transactions such as * write(2) and truncate(2) we can write more than this, but we always * start off at the maximum transaction size and grow the transaction * optimistically as we go. */ #define EXT4_MAX_TRANS_DATA 64U /* We break up a large truncate or write transaction once the handle's * buffer credits gets this low, we need either to extend the * transaction or to start a new one. Reserve enough space here for * inode, bitmap, superblock, group and indirection updates for at least * one block, plus two quota updates. Quota allocations are not * needed. */ #define EXT4_RESERVE_TRANS_BLOCKS 12U /* * Number of credits needed if we need to insert an entry into a * directory. For each new index block, we need 4 blocks (old index * block, new index block, bitmap block, bg summary). For normal * htree directories there are 2 levels; if the largedir feature * enabled it's 3 levels. */ #define EXT4_INDEX_EXTRA_TRANS_BLOCKS 12U #ifdef CONFIG_QUOTA /* Amount of blocks needed for quota update - we know that the structure was * allocated so we need to update only data block */ #define EXT4_QUOTA_TRANS_BLOCKS(sb) ((ext4_quota_capable(sb)) ? 1 : 0) /* Amount of blocks needed for quota insert/delete - we do some block writes * but inode, sb and group updates are done only once */ #define EXT4_QUOTA_INIT_BLOCKS(sb) ((ext4_quota_capable(sb)) ?\ (DQUOT_INIT_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\ +3+DQUOT_INIT_REWRITE) : 0) #define EXT4_QUOTA_DEL_BLOCKS(sb) ((ext4_quota_capable(sb)) ?\ (DQUOT_DEL_ALLOC*(EXT4_SINGLEDATA_TRANS_BLOCKS(sb)-3)\ +3+DQUOT_DEL_REWRITE) : 0) #else #define EXT4_QUOTA_TRANS_BLOCKS(sb) 0 #define EXT4_QUOTA_INIT_BLOCKS(sb) 0 #define EXT4_QUOTA_DEL_BLOCKS(sb) 0 #endif #define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb)) #define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb)) #define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb)) /* * Ext4 handle operation types -- for logging purposes */ #define EXT4_HT_MISC 0 #define EXT4_HT_INODE 1 #define EXT4_HT_WRITE_PAGE 2 #define EXT4_HT_MAP_BLOCKS 3 #define EXT4_HT_DIR 4 #define EXT4_HT_TRUNCATE 5 #define EXT4_HT_QUOTA 6 #define EXT4_HT_RESIZE 7 #define EXT4_HT_MIGRATE 8 #define EXT4_HT_MOVE_EXTENTS 9 #define EXT4_HT_XATTR 10 #define EXT4_HT_EXT_CONVERT 11 #define EXT4_HT_MAX 12 /** * struct ext4_journal_cb_entry - Base structure for callback information. * * This struct is a 'seed' structure for a using with your own callback * structs. If you are using callbacks you must allocate one of these * or another struct of your own definition which has this struct * as it's first element and pass it to ext4_journal_callback_add(). */ struct ext4_journal_cb_entry { /* list information for other callbacks attached to the same handle */ struct list_head jce_list; /* Function to call with this callback structure */ void (*jce_func)(struct super_block *sb, struct ext4_journal_cb_entry *jce, int error); /* user data goes here */ }; /** * ext4_journal_callback_add: add a function to call after transaction commit * @handle: active journal transaction handle to register callback on * @func: callback function to call after the transaction has committed: * @sb: superblock of current filesystem for transaction * @jce: returned journal callback data * @rc: journal state at commit (0 = transaction committed properly) * @jce: journal callback data (internal and function private data struct) * * The registered function will be called in the context of the journal thread * after the transaction for which the handle was created has completed. * * No locks are held when the callback function is called, so it is safe to * call blocking functions from within the callback, but the callback should * not block or run for too long, or the filesystem will be blocked waiting for * the next transaction to commit. No journaling functions can be used, or * there is a risk of deadlock. * * There is no guaranteed calling order of multiple registered callbacks on * the same transaction. */ static inline void _ext4_journal_callback_add(handle_t *handle, struct ext4_journal_cb_entry *jce) { /* Add the jce to transaction's private list */ list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list); } static inline void ext4_journal_callback_add(handle_t *handle, void (*func)(struct super_block *sb, struct ext4_journal_cb_entry *jce, int rc), struct ext4_journal_cb_entry *jce) { struct ext4_sb_info *sbi = EXT4_SB(handle->h_transaction->t_journal->j_private); /* Add the jce to transaction's private list */ jce->jce_func = func; spin_lock(&sbi->s_md_lock); _ext4_journal_callback_add(handle, jce); spin_unlock(&sbi->s_md_lock); } /** * ext4_journal_callback_del: delete a registered callback * @handle: active journal transaction handle on which callback was registered * @jce: registered journal callback entry to unregister * Return true if object was successfully removed */ static inline bool ext4_journal_callback_try_del(handle_t *handle, struct ext4_journal_cb_entry *jce) { bool deleted; struct ext4_sb_info *sbi = EXT4_SB(handle->h_transaction->t_journal->j_private); spin_lock(&sbi->s_md_lock); deleted = !list_empty(&jce->jce_list); list_del_init(&jce->jce_list); spin_unlock(&sbi->s_md_lock); return deleted; } int ext4_mark_iloc_dirty(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); /* * On success, We end up with an outstanding reference count against * iloc->bh. This _must_ be cleaned up later. */ int ext4_reserve_inode_write(handle_t *handle, struct inode *inode, struct ext4_iloc *iloc); #define ext4_mark_inode_dirty(__h, __i) \ __ext4_mark_inode_dirty((__h), (__i), __func__, __LINE__) int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode, const char *func, unsigned int line); int ext4_expand_extra_isize(struct inode *inode, unsigned int new_extra_isize, struct ext4_iloc *iloc); /* * Wrapper functions with which ext4 calls into JBD. */ int __ext4_journal_get_write_access(const char *where, unsigned int line, handle_t *handle, struct super_block *sb, struct buffer_head *bh, enum ext4_journal_trigger_type trigger_type); int __ext4_forget(const char *where, unsigned int line, handle_t *handle, int is_metadata, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t blocknr); int __ext4_journal_get_create_access(const char *where, unsigned int line, handle_t *handle, struct super_block *sb, struct buffer_head *bh, enum ext4_journal_trigger_type trigger_type); int __ext4_handle_dirty_metadata(const char *where, unsigned int line, handle_t *handle, struct inode *inode, struct buffer_head *bh); #define ext4_journal_get_write_access(handle, sb, bh, trigger_type) \ __ext4_journal_get_write_access(__func__, __LINE__, (handle), (sb), \ (bh), (trigger_type)) #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \ __ext4_forget(__func__, __LINE__, (handle), (is_metadata), (inode), \ (bh), (block_nr)) #define ext4_journal_get_create_access(handle, sb, bh, trigger_type) \ __ext4_journal_get_create_access(__func__, __LINE__, (handle), (sb), \ (bh), (trigger_type)) #define ext4_handle_dirty_metadata(handle, inode, bh) \ __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \ (bh)) handle_t *__ext4_journal_start_sb(struct inode *inode, struct super_block *sb, unsigned int line, int type, int blocks, int rsv_blocks, int revoke_creds); int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle); #define EXT4_NOJOURNAL_MAX_REF_COUNT ((unsigned long) 4096) /* Note: Do not use this for NULL handles. This is only to determine if * a properly allocated handle is using a journal or not. */ static inline int ext4_handle_valid(handle_t *handle) { if ((unsigned long)handle < EXT4_NOJOURNAL_MAX_REF_COUNT) return 0; return 1; } static inline void ext4_handle_sync(handle_t *handle) { if (ext4_handle_valid(handle)) handle->h_sync = 1; } static inline int ext4_handle_is_aborted(handle_t *handle) { if (ext4_handle_valid(handle)) return is_handle_aborted(handle); return 0; } static inline int ext4_free_metadata_revoke_credits(struct super_block *sb, int blocks) { /* Freeing each metadata block can result in freeing one cluster */ return blocks * EXT4_SB(sb)->s_cluster_ratio; } static inline int ext4_trans_default_revoke_credits(struct super_block *sb) { return ext4_free_metadata_revoke_credits(sb, 8); } #define ext4_journal_start_sb(sb, type, nblocks) \ __ext4_journal_start_sb(NULL, (sb), __LINE__, (type), (nblocks), 0,\ ext4_trans_default_revoke_credits(sb)) #define ext4_journal_start(inode, type, nblocks) \ __ext4_journal_start((inode), __LINE__, (type), (nblocks), 0, \ ext4_trans_default_revoke_credits((inode)->i_sb)) #define ext4_journal_start_with_reserve(inode, type, blocks, rsv_blocks)\ __ext4_journal_start((inode), __LINE__, (type), (blocks), (rsv_blocks),\ ext4_trans_default_revoke_credits((inode)->i_sb)) #define ext4_journal_start_with_revoke(inode, type, blocks, revoke_creds) \ __ext4_journal_start((inode), __LINE__, (type), (blocks), 0, \ (revoke_creds)) static inline handle_t *__ext4_journal_start(struct inode *inode, unsigned int line, int type, int blocks, int rsv_blocks, int revoke_creds) { return __ext4_journal_start_sb(inode, inode->i_sb, line, type, blocks, rsv_blocks, revoke_creds); } #define ext4_journal_stop(handle) \ __ext4_journal_stop(__func__, __LINE__, (handle)) #define ext4_journal_start_reserved(handle, type) \ __ext4_journal_start_reserved((handle), __LINE__, (type)) handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line, int type); static inline handle_t *ext4_journal_current_handle(void) { return journal_current_handle(); } static inline int ext4_journal_extend(handle_t *handle, int nblocks, int revoke) { if (ext4_handle_valid(handle)) return jbd2_journal_extend(handle, nblocks, revoke); return 0; } static inline int ext4_journal_restart(handle_t *handle, int nblocks, int revoke) { if (ext4_handle_valid(handle)) return jbd2__journal_restart(handle, nblocks, revoke, GFP_NOFS); return 0; } int __ext4_journal_ensure_credits(handle_t *handle, int check_cred, int extend_cred, int revoke_cred); /* * Ensure @handle has at least @check_creds credits available. If not, * transaction will be extended or restarted to contain at least @extend_cred * credits. Before restarting transaction @fn is executed to allow for cleanup * before the transaction is restarted. * * The return value is < 0 in case of error, 0 in case the handle has enough * credits or transaction extension succeeded, 1 in case transaction had to be * restarted. */ #define ext4_journal_ensure_credits_fn(handle, check_cred, extend_cred, \ revoke_cred, fn) \ ({ \ __label__ __ensure_end; \ int err = __ext4_journal_ensure_credits((handle), (check_cred), \ (extend_cred), (revoke_cred)); \ \ if (err <= 0) \ goto __ensure_end; \ err = (fn); \ if (err < 0) \ goto __ensure_end; \ err = ext4_journal_restart((handle), (extend_cred), (revoke_cred)); \ if (err == 0) \ err = 1; \ __ensure_end: \ err; \ }) /* * Ensure given handle has at least requested amount of credits available, * possibly restarting transaction if needed. We also make sure the transaction * has space for at least ext4_trans_default_revoke_credits(sb) revoke records * as freeing one or two blocks is very common pattern and requesting this is * very cheap. */ static inline int ext4_journal_ensure_credits(handle_t *handle, int credits, int revoke_creds) { return ext4_journal_ensure_credits_fn(handle, credits, credits, revoke_creds, 0); } static inline int ext4_journal_blocks_per_page(struct inode *inode) { if (EXT4_JOURNAL(inode) != NULL) return jbd2_journal_blocks_per_page(inode); return 0; } static inline int ext4_journal_force_commit(journal_t *journal) { if (journal) return jbd2_journal_force_commit(journal); return 0; } static inline int ext4_jbd2_inode_add_write(handle_t *handle, struct inode *inode, loff_t start_byte, loff_t length) { if (ext4_handle_valid(handle)) return jbd2_journal_inode_ranged_write(handle, EXT4_I(inode)->jinode, start_byte, length); return 0; } static inline int ext4_jbd2_inode_add_wait(handle_t *handle, struct inode *inode, loff_t start_byte, loff_t length) { if (ext4_handle_valid(handle)) return jbd2_journal_inode_ranged_wait(handle, EXT4_I(inode)->jinode, start_byte, length); return 0; } static inline void ext4_update_inode_fsync_trans(handle_t *handle, struct inode *inode, int datasync) { struct ext4_inode_info *ei = EXT4_I(inode); if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) { ei->i_sync_tid = handle->h_transaction->t_tid; if (datasync) ei->i_datasync_tid = handle->h_transaction->t_tid; } } /* super.c */ int ext4_force_commit(struct super_block *sb); /* * Ext4 inode journal modes */ #define EXT4_INODE_JOURNAL_DATA_MODE 0x01 /* journal data mode */ #define EXT4_INODE_ORDERED_DATA_MODE 0x02 /* ordered data mode */ #define EXT4_INODE_WRITEBACK_DATA_MODE 0x04 /* writeback data mode */ int ext4_inode_journal_mode(struct inode *inode); static inline int ext4_should_journal_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_JOURNAL_DATA_MODE; } static inline int ext4_should_order_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_ORDERED_DATA_MODE; } static inline int ext4_should_writeback_data(struct inode *inode) { return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE; } static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks) { if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) return 0; if (!ext4_should_journal_data(inode)) return 0; /* * Data blocks in one extent are contiguous, just account for partial * clusters at extent boundaries */ return blocks + 2*(EXT4_SB(inode->i_sb)->s_cluster_ratio - 1); } /* * This function controls whether or not we should try to go down the * dioread_nolock code paths, which makes it safe to avoid taking * i_rwsem for direct I/O reads. This only works for extent-based * files, and it doesn't work if data journaling is enabled, since the * dioread_nolock code uses b_private to pass information back to the * I/O completion handler, and this conflicts with the jbd's use of * b_private. */ static inline int ext4_should_dioread_nolock(struct inode *inode) { if (!test_opt(inode->i_sb, DIOREAD_NOLOCK)) return 0; if (!S_ISREG(inode->i_mode)) return 0; if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return 0; if (ext4_should_journal_data(inode)) return 0; /* temporary fix to prevent generic/422 test failures */ if (!test_opt(inode->i_sb, DELALLOC)) return 0; return 1; } #endif /* _EXT4_JBD2_H */
103 82 2 19 3 3 119 107 9 10 10 10 10 120 97 107 120 89 1 84 89 84 83 89 4 1 2 1 1 4 4 4 2 1 3 1 2 1 1 1 2 1 1 2 1 3 3 3 3 2 1 1 1 1 1 1 1 1 1 1 1 1 11 1 11 2 18 11 4 3 8 2 1 2 2 32 6 1 3 8 1 15 15 19 3 16 5 8 5 8 12 16 14 2 9 9 28 14 27 1 1 1 1 41 1 1 1 1 35 1 1 19 1 2 1 21 1 4 1 8 2 16 3 3 11 36 27 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 /* * Copyright (C) 2014 Red Hat * Copyright (C) 2014 Intel Corp. * Copyright (C) 2018 Intel Corp. * Copyright (c) 2020, The Linux Foundation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rob Clark <robdclark@gmail.com> * Daniel Vetter <daniel.vetter@ffwll.ch> */ #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic.h> #include <drm/drm_framebuffer.h> #include <drm/drm_print.h> #include <drm/drm_drv.h> #include <drm/drm_writeback.h> #include <drm/drm_vblank.h> #include <linux/dma-fence.h> #include <linux/uaccess.h> #include <linux/sync_file.h> #include <linux/file.h> #include "drm_crtc_internal.h" /** * DOC: overview * * This file contains the marshalling and demarshalling glue for the atomic UAPI * in all its forms: The monster ATOMIC IOCTL itself, code for GET_PROPERTY and * SET_PROPERTY IOCTLs. Plus interface functions for compatibility helpers and * drivers which have special needs to construct their own atomic updates, e.g. * for load detect or similar. */ /** * drm_atomic_set_mode_for_crtc - set mode for CRTC * @state: the CRTC whose incoming state to update * @mode: kernel-internal mode to use for the CRTC, or NULL to disable * * Set a mode (originating from the kernel) on the desired CRTC state and update * the enable property. * * RETURNS: * Zero on success, error code on failure. Cannot return -EDEADLK. */ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state, const struct drm_display_mode *mode) { struct drm_crtc *crtc = state->crtc; struct drm_mode_modeinfo umode; /* Early return for no change. */ if (mode && memcmp(&state->mode, mode, sizeof(*mode)) == 0) return 0; drm_property_blob_put(state->mode_blob); state->mode_blob = NULL; if (mode) { struct drm_property_blob *blob; drm_mode_convert_to_umode(&umode, mode); blob = drm_property_create_blob(crtc->dev, sizeof(umode), &umode); if (IS_ERR(blob)) return PTR_ERR(blob); drm_mode_copy(&state->mode, mode); state->mode_blob = blob; state->enable = true; drm_dbg_atomic(crtc->dev, "Set [MODE:%s] for [CRTC:%d:%s] state %p\n", mode->name, crtc->base.id, crtc->name, state); } else { memset(&state->mode, 0, sizeof(state->mode)); state->enable = false; drm_dbg_atomic(crtc->dev, "Set [NOMODE] for [CRTC:%d:%s] state %p\n", crtc->base.id, crtc->name, state); } return 0; } EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc); /** * drm_atomic_set_mode_prop_for_crtc - set mode for CRTC * @state: the CRTC whose incoming state to update * @blob: pointer to blob property to use for mode * * Set a mode (originating from a blob property) on the desired CRTC state. * This function will take a reference on the blob property for the CRTC state, * and release the reference held on the state's existing mode property, if any * was set. * * RETURNS: * Zero on success, error code on failure. Cannot return -EDEADLK. */ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state, struct drm_property_blob *blob) { struct drm_crtc *crtc = state->crtc; if (blob == state->mode_blob) return 0; drm_property_blob_put(state->mode_blob); state->mode_blob = NULL; memset(&state->mode, 0, sizeof(state->mode)); if (blob) { int ret; if (blob->length != sizeof(struct drm_mode_modeinfo)) { drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] bad mode blob length: %zu\n", crtc->base.id, crtc->name, blob->length); return -EINVAL; } ret = drm_mode_convert_umode(crtc->dev, &state->mode, blob->data); if (ret) { drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] invalid mode (%s, %pe): " DRM_MODE_FMT "\n", crtc->base.id, crtc->name, drm_get_mode_status_name(state->mode.status), ERR_PTR(ret), DRM_MODE_ARG(&state->mode)); return -EINVAL; } state->mode_blob = drm_property_blob_get(blob); state->enable = true; drm_dbg_atomic(crtc->dev, "Set [MODE:%s] for [CRTC:%d:%s] state %p\n", state->mode.name, crtc->base.id, crtc->name, state); } else { state->enable = false; drm_dbg_atomic(crtc->dev, "Set [NOMODE] for [CRTC:%d:%s] state %p\n", crtc->base.id, crtc->name, state); } return 0; } EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc); /** * drm_atomic_set_crtc_for_plane - set CRTC for plane * @plane_state: the plane whose incoming state to update * @crtc: CRTC to use for the plane * * Changing the assigned CRTC for a plane requires us to grab the lock and state * for the new CRTC, as needed. This function takes care of all these details * besides updating the pointer in the state object itself. * * Returns: * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK * then the w/w mutex code has detected a deadlock and the entire atomic * sequence must be restarted. All other errors are fatal. */ int drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, struct drm_crtc *crtc) { struct drm_plane *plane = plane_state->plane; struct drm_crtc_state *crtc_state; /* Nothing to do for same crtc*/ if (plane_state->crtc == crtc) return 0; if (plane_state->crtc) { crtc_state = drm_atomic_get_crtc_state(plane_state->state, plane_state->crtc); if (WARN_ON(IS_ERR(crtc_state))) return PTR_ERR(crtc_state); crtc_state->plane_mask &= ~drm_plane_mask(plane); } plane_state->crtc = crtc; if (crtc) { crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); crtc_state->plane_mask |= drm_plane_mask(plane); } if (crtc) drm_dbg_atomic(plane->dev, "Link [PLANE:%d:%s] state %p to [CRTC:%d:%s]\n", plane->base.id, plane->name, plane_state, crtc->base.id, crtc->name); else drm_dbg_atomic(plane->dev, "Link [PLANE:%d:%s] state %p to [NOCRTC]\n", plane->base.id, plane->name, plane_state); return 0; } EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane); /** * drm_atomic_set_fb_for_plane - set framebuffer for plane * @plane_state: atomic state object for the plane * @fb: fb to use for the plane * * Changing the assigned framebuffer for a plane requires us to grab a reference * to the new fb and drop the reference to the old fb, if there is one. This * function takes care of all these details besides updating the pointer in the * state object itself. */ void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state, struct drm_framebuffer *fb) { struct drm_plane *plane = plane_state->plane; if (fb) drm_dbg_atomic(plane->dev, "Set [FB:%d] for [PLANE:%d:%s] state %p\n", fb->base.id, plane->base.id, plane->name, plane_state); else drm_dbg_atomic(plane->dev, "Set [NOFB] for [PLANE:%d:%s] state %p\n", plane->base.id, plane->name, plane_state); drm_framebuffer_assign(&plane_state->fb, fb); } EXPORT_SYMBOL(drm_atomic_set_fb_for_plane); /** * drm_atomic_set_crtc_for_connector - set CRTC for connector * @conn_state: atomic state object for the connector * @crtc: CRTC to use for the connector * * Changing the assigned CRTC for a connector requires us to grab the lock and * state for the new CRTC, as needed. This function takes care of all these * details besides updating the pointer in the state object itself. * * Returns: * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK * then the w/w mutex code has detected a deadlock and the entire atomic * sequence must be restarted. All other errors are fatal. */ int drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state, struct drm_crtc *crtc) { struct drm_connector *connector = conn_state->connector; struct drm_crtc_state *crtc_state; if (conn_state->crtc == crtc) return 0; if (conn_state->crtc) { crtc_state = drm_atomic_get_new_crtc_state(conn_state->state, conn_state->crtc); crtc_state->connector_mask &= ~drm_connector_mask(conn_state->connector); drm_connector_put(conn_state->connector); conn_state->crtc = NULL; } if (crtc) { crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); crtc_state->connector_mask |= drm_connector_mask(conn_state->connector); drm_connector_get(conn_state->connector); conn_state->crtc = crtc; drm_dbg_atomic(connector->dev, "Link [CONNECTOR:%d:%s] state %p to [CRTC:%d:%s]\n", connector->base.id, connector->name, conn_state, crtc->base.id, crtc->name); } else { drm_dbg_atomic(connector->dev, "Link [CONNECTOR:%d:%s] state %p to [NOCRTC]\n", connector->base.id, connector->name, conn_state); } return 0; } EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector); static void set_out_fence_for_crtc(struct drm_atomic_state *state, struct drm_crtc *crtc, s32 __user *fence_ptr) { state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr; } static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state, struct drm_crtc *crtc) { s32 __user *fence_ptr; fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr; state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL; return fence_ptr; } static int set_out_fence_for_connector(struct drm_atomic_state *state, struct drm_connector *connector, s32 __user *fence_ptr) { unsigned int index = drm_connector_index(connector); if (!fence_ptr) return 0; if (put_user(-1, fence_ptr)) return -EFAULT; state->connectors[index].out_fence_ptr = fence_ptr; return 0; } static s32 __user *get_out_fence_for_connector(struct drm_atomic_state *state, struct drm_connector *connector) { unsigned int index = drm_connector_index(connector); s32 __user *fence_ptr; fence_ptr = state->connectors[index].out_fence_ptr; state->connectors[index].out_fence_ptr = NULL; return fence_ptr; } static int drm_atomic_crtc_set_property(struct drm_crtc *crtc, struct drm_crtc_state *state, struct drm_property *property, uint64_t val) { struct drm_device *dev = crtc->dev; struct drm_mode_config *config = &dev->mode_config; bool replaced = false; int ret; if (property == config->prop_active) state->active = val; else if (property == config->prop_mode_id) { struct drm_property_blob *mode = drm_property_lookup_blob(dev, val); ret = drm_atomic_set_mode_prop_for_crtc(state, mode); drm_property_blob_put(mode); return ret; } else if (property == config->prop_vrr_enabled) { state->vrr_enabled = val; } else if (property == config->degamma_lut_property) { ret = drm_property_replace_blob_from_id(dev, &state->degamma_lut, val, -1, sizeof(struct drm_color_lut), &replaced); state->color_mgmt_changed |= replaced; return ret; } else if (property == config->ctm_property) { ret = drm_property_replace_blob_from_id(dev, &state->ctm, val, sizeof(struct drm_color_ctm), -1, &replaced); state->color_mgmt_changed |= replaced; return ret; } else if (property == config->gamma_lut_property) { ret = drm_property_replace_blob_from_id(dev, &state->gamma_lut, val, -1, sizeof(struct drm_color_lut), &replaced); state->color_mgmt_changed |= replaced; return ret; } else if (property == config->prop_out_fence_ptr) { s32 __user *fence_ptr = u64_to_user_ptr(val); if (!fence_ptr) return 0; if (put_user(-1, fence_ptr)) return -EFAULT; set_out_fence_for_crtc(state->state, crtc, fence_ptr); } else if (property == crtc->scaling_filter_property) { state->scaling_filter = val; } else if (crtc->funcs->atomic_set_property) { return crtc->funcs->atomic_set_property(crtc, state, property, val); } else { drm_dbg_atomic(crtc->dev, "[CRTC:%d:%s] unknown property [PROP:%d:%s]\n", crtc->base.id, crtc->name, property->base.id, property->name); return -EINVAL; } return 0; } static int drm_atomic_crtc_get_property(struct drm_crtc *crtc, const struct drm_crtc_state *state, struct drm_property *property, uint64_t *val) { struct drm_device *dev = crtc->dev; struct drm_mode_config *config = &dev->mode_config; if (property == config->prop_active) *val = drm_atomic_crtc_effectively_active(state); else if (property == config->prop_mode_id) *val = (state->mode_blob) ? state->mode_blob->base.id : 0; else if (property == config->prop_vrr_enabled) *val = state->vrr_enabled; else if (property == config->degamma_lut_property) *val = (state->degamma_lut) ? state->degamma_lut->base.id : 0; else if (property == config->ctm_property) *val = (state->ctm) ? state->ctm->base.id : 0; else if (property == config->gamma_lut_property) *val = (state->gamma_lut) ? state->gamma_lut->base.id : 0; else if (property == config->prop_out_fence_ptr) *val = 0; else if (property == crtc->scaling_filter_property) *val = state->scaling_filter; else if (crtc->funcs->atomic_get_property) return crtc->funcs->atomic_get_property(crtc, state, property, val); else { drm_dbg_atomic(dev, "[CRTC:%d:%s] unknown property [PROP:%d:%s]\n", crtc->base.id, crtc->name, property->base.id, property->name); return -EINVAL; } return 0; } static int drm_atomic_plane_set_property(struct drm_plane *plane, struct drm_plane_state *state, struct drm_file *file_priv, struct drm_property *property, uint64_t val) { struct drm_device *dev = plane->dev; struct drm_mode_config *config = &dev->mode_config; bool replaced = false; int ret; if (property == config->prop_fb_id) { struct drm_framebuffer *fb; fb = drm_framebuffer_lookup(dev, file_priv, val); drm_atomic_set_fb_for_plane(state, fb); if (fb) drm_framebuffer_put(fb); } else if (property == config->prop_in_fence_fd) { if (state->fence) return -EINVAL; if (U642I64(val) == -1) return 0; state->fence = sync_file_get_fence(val); if (!state->fence) return -EINVAL; } else if (property == config->prop_crtc_id) { struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val); if (val && !crtc) { drm_dbg_atomic(dev, "[PROP:%d:%s] cannot find CRTC with ID %llu\n", property->base.id, property->name, val); return -EACCES; } return drm_atomic_set_crtc_for_plane(state, crtc); } else if (property == config->prop_crtc_x) { state->crtc_x = U642I64(val); } else if (property == config->prop_crtc_y) { state->crtc_y = U642I64(val); } else if (property == config->prop_crtc_w) { state->crtc_w = val; } else if (property == config->prop_crtc_h) { state->crtc_h = val; } else if (property == config->prop_src_x) { state->src_x = val; } else if (property == config->prop_src_y) { state->src_y = val; } else if (property == config->prop_src_w) { state->src_w = val; } else if (property == config->prop_src_h) { state->src_h = val; } else if (property == plane->alpha_property) { state->alpha = val; } else if (property == plane->blend_mode_property) { state->pixel_blend_mode = val; } else if (property == plane->rotation_property) { if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n", plane->base.id, plane->name, val); return -EINVAL; } state->rotation = val; } else if (property == plane->zpos_property) { state->zpos = val; } else if (property == plane->color_encoding_property) { state->color_encoding = val; } else if (property == plane->color_range_property) { state->color_range = val; } else if (property == config->prop_fb_damage_clips) { ret = drm_property_replace_blob_from_id(dev, &state->fb_damage_clips, val, -1, sizeof(struct drm_mode_rect), &replaced); return ret; } else if (property == plane->scaling_filter_property) { state->scaling_filter = val; } else if (plane->funcs->atomic_set_property) { return plane->funcs->atomic_set_property(plane, state, property, val); } else if (property == plane->hotspot_x_property) { if (plane->type != DRM_PLANE_TYPE_CURSOR) { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n", plane->base.id, plane->name, val); return -EINVAL; } state->hotspot_x = val; } else if (property == plane->hotspot_y_property) { if (plane->type != DRM_PLANE_TYPE_CURSOR) { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] is not a cursor plane: 0x%llx\n", plane->base.id, plane->name, val); return -EINVAL; } state->hotspot_y = val; } else { drm_dbg_atomic(plane->dev, "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n", plane->base.id, plane->name, property->base.id, property->name); return -EINVAL; } return 0; } static int drm_atomic_plane_get_property(struct drm_plane *plane, const struct drm_plane_state *state, struct drm_property *property, uint64_t *val) { struct drm_device *dev = plane->dev; struct drm_mode_config *config = &dev->mode_config; if (property == config->prop_fb_id) { *val = (state->fb) ? state->fb->base.id : 0; } else if (property == config->prop_in_fence_fd) { *val = -1; } else if (property == config->prop_crtc_id) { *val = (state->crtc) ? state->crtc->base.id : 0; } else if (property == config->prop_crtc_x) { *val = I642U64(state->crtc_x); } else if (property == config->prop_crtc_y) { *val = I642U64(state->crtc_y); } else if (property == config->prop_crtc_w) { *val = state->crtc_w; } else if (property == config->prop_crtc_h) { *val = state->crtc_h; } else if (property == config->prop_src_x) { *val = state->src_x; } else if (property == config->prop_src_y) { *val = state->src_y; } else if (property == config->prop_src_w) { *val = state->src_w; } else if (property == config->prop_src_h) { *val = state->src_h; } else if (property == plane->alpha_property) { *val = state->alpha; } else if (property == plane->blend_mode_property) { *val = state->pixel_blend_mode; } else if (property == plane->rotation_property) { *val = state->rotation; } else if (property == plane->zpos_property) { *val = state->zpos; } else if (property == plane->color_encoding_property) { *val = state->color_encoding; } else if (property == plane->color_range_property) { *val = state->color_range; } else if (property == config->prop_fb_damage_clips) { *val = (state->fb_damage_clips) ? state->fb_damage_clips->base.id : 0; } else if (property == plane->scaling_filter_property) { *val = state->scaling_filter; } else if (plane->funcs->atomic_get_property) { return plane->funcs->atomic_get_property(plane, state, property, val); } else if (property == plane->hotspot_x_property) { *val = state->hotspot_x; } else if (property == plane->hotspot_y_property) { *val = state->hotspot_y; } else { drm_dbg_atomic(dev, "[PLANE:%d:%s] unknown property [PROP:%d:%s]\n", plane->base.id, plane->name, property->base.id, property->name); return -EINVAL; } return 0; } static int drm_atomic_set_writeback_fb_for_connector( struct drm_connector_state *conn_state, struct drm_framebuffer *fb) { int ret; struct drm_connector *conn = conn_state->connector; ret = drm_writeback_set_fb(conn_state, fb); if (ret < 0) return ret; if (fb) drm_dbg_atomic(conn->dev, "Set [FB:%d] for connector state %p\n", fb->base.id, conn_state); else drm_dbg_atomic(conn->dev, "Set [NOFB] for connector state %p\n", conn_state); return 0; } static int drm_atomic_connector_set_property(struct drm_connector *connector, struct drm_connector_state *state, struct drm_file *file_priv, struct drm_property *property, uint64_t val) { struct drm_device *dev = connector->dev; struct drm_mode_config *config = &dev->mode_config; bool replaced = false; int ret; if (property == config->prop_crtc_id) { struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val); if (val && !crtc) { drm_dbg_atomic(dev, "[PROP:%d:%s] cannot find CRTC with ID %llu\n", property->base.id, property->name, val); return -EACCES; } return drm_atomic_set_crtc_for_connector(state, crtc); } else if (property == config->dpms_property) { /* setting DPMS property requires special handling, which * is done in legacy setprop path for us. Disallow (for * now?) atomic writes to DPMS property: */ drm_dbg_atomic(dev, "legacy [PROP:%d:%s] can only be set via legacy uAPI\n", property->base.id, property->name); return -EINVAL; } else if (property == config->tv_select_subconnector_property) { state->tv.select_subconnector = val; } else if (property == config->tv_subconnector_property) { state->tv.subconnector = val; } else if (property == config->tv_left_margin_property) { state->tv.margins.left = val; } else if (property == config->tv_right_margin_property) { state->tv.margins.right = val; } else if (property == config->tv_top_margin_property) { state->tv.margins.top = val; } else if (property == config->tv_bottom_margin_property) { state->tv.margins.bottom = val; } else if (property == config->legacy_tv_mode_property) { state->tv.legacy_mode = val; } else if (property == config->tv_mode_property) { state->tv.mode = val; } else if (property == config->tv_brightness_property) { state->tv.brightness = val; } else if (property == config->tv_contrast_property) { state->tv.contrast = val; } else if (property == config->tv_flicker_reduction_property) { state->tv.flicker_reduction = val; } else if (property == config->tv_overscan_property) { state->tv.overscan = val; } else if (property == config->tv_saturation_property) { state->tv.saturation = val; } else if (property == config->tv_hue_property) { state->tv.hue = val; } else if (property == config->link_status_property) { /* Never downgrade from GOOD to BAD on userspace's request here, * only hw issues can do that. * * For an atomic property the userspace doesn't need to be able * to understand all the properties, but needs to be able to * restore the state it wants on VT switch. So if the userspace * tries to change the link_status from GOOD to BAD, driver * silently rejects it and returns a 0. This prevents userspace * from accidentally breaking the display when it restores the * state. */ if (state->link_status != DRM_LINK_STATUS_GOOD) state->link_status = val; } else if (property == config->hdr_output_metadata_property) { ret = drm_property_replace_blob_from_id(dev, &state->hdr_output_metadata, val, sizeof(struct hdr_output_metadata), -1, &replaced); return ret; } else if (property == config->aspect_ratio_property) { state->picture_aspect_ratio = val; } else if (property == config->content_type_property) { state->content_type = val; } else if (property == connector->scaling_mode_property) { state->scaling_mode = val; } else if (property == config->content_protection_property) { if (val == DRM_MODE_CONTENT_PROTECTION_ENABLED) { drm_dbg_kms(dev, "only drivers can set CP Enabled\n"); return -EINVAL; } state->content_protection = val; } else if (property == config->hdcp_content_type_property) { state->hdcp_content_type = val; } else if (property == connector->colorspace_property) { state->colorspace = val; } else if (property == config->writeback_fb_id_property) { struct drm_framebuffer *fb; int ret; fb = drm_framebuffer_lookup(dev, file_priv, val); ret = drm_atomic_set_writeback_fb_for_connector(state, fb); if (fb) drm_framebuffer_put(fb); return ret; } else if (property == config->writeback_out_fence_ptr_property) { s32 __user *fence_ptr = u64_to_user_ptr(val); return set_out_fence_for_connector(state->state, connector, fence_ptr); } else if (property == connector->max_bpc_property) { state->max_requested_bpc = val; } else if (property == connector->privacy_screen_sw_state_property) { state->privacy_screen_sw_state = val; } else if (property == connector->broadcast_rgb_property) { state->hdmi.broadcast_rgb = val; } else if (connector->funcs->atomic_set_property) { return connector->funcs->atomic_set_property(connector, state, property, val); } else { drm_dbg_atomic(connector->dev, "[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]\n", connector->base.id, connector->name, property->base.id, property->name); return -EINVAL; } return 0; } static int drm_atomic_connector_get_property(struct drm_connector *connector, const struct drm_connector_state *state, struct drm_property *property, uint64_t *val) { struct drm_device *dev = connector->dev; struct drm_mode_config *config = &dev->mode_config; if (property == config->prop_crtc_id) { *val = (state->crtc) ? state->crtc->base.id : 0; } else if (property == config->dpms_property) { if (state->crtc && state->crtc->state->self_refresh_active) *val = DRM_MODE_DPMS_ON; else *val = connector->dpms; } else if (property == config->tv_select_subconnector_property) { *val = state->tv.select_subconnector; } else if (property == config->tv_subconnector_property) { *val = state->tv.subconnector; } else if (property == config->tv_left_margin_property) { *val = state->tv.margins.left; } else if (property == config->tv_right_margin_property) { *val = state->tv.margins.right; } else if (property == config->tv_top_margin_property) { *val = state->tv.margins.top; } else if (property == config->tv_bottom_margin_property) { *val = state->tv.margins.bottom; } else if (property == config->legacy_tv_mode_property) { *val = state->tv.legacy_mode; } else if (property == config->tv_mode_property) { *val = state->tv.mode; } else if (property == config->tv_brightness_property) { *val = state->tv.brightness; } else if (property == config->tv_contrast_property) { *val = state->tv.contrast; } else if (property == config->tv_flicker_reduction_property) { *val = state->tv.flicker_reduction; } else if (property == config->tv_overscan_property) { *val = state->tv.overscan; } else if (property == config->tv_saturation_property) { *val = state->tv.saturation; } else if (property == config->tv_hue_property) { *val = state->tv.hue; } else if (property == config->link_status_property) { *val = state->link_status; } else if (property == config->aspect_ratio_property) { *val = state->picture_aspect_ratio; } else if (property == config->content_type_property) { *val = state->content_type; } else if (property == connector->colorspace_property) { *val = state->colorspace; } else if (property == connector->scaling_mode_property) { *val = state->scaling_mode; } else if (property == config->hdr_output_metadata_property) { *val = state->hdr_output_metadata ? state->hdr_output_metadata->base.id : 0; } else if (property == config->content_protection_property) { *val = state->content_protection; } else if (property == config->hdcp_content_type_property) { *val = state->hdcp_content_type; } else if (property == config->writeback_fb_id_property) { /* Writeback framebuffer is one-shot, write and forget */ *val = 0; } else if (property == config->writeback_out_fence_ptr_property) { *val = 0; } else if (property == connector->max_bpc_property) { *val = state->max_requested_bpc; } else if (property == connector->privacy_screen_sw_state_property) { *val = state->privacy_screen_sw_state; } else if (property == connector->broadcast_rgb_property) { *val = state->hdmi.broadcast_rgb; } else if (connector->funcs->atomic_get_property) { return connector->funcs->atomic_get_property(connector, state, property, val); } else { drm_dbg_atomic(dev, "[CONNECTOR:%d:%s] unknown property [PROP:%d:%s]\n", connector->base.id, connector->name, property->base.id, property->name); return -EINVAL; } return 0; } int drm_atomic_get_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t *val) { struct drm_device *dev = property->dev; int ret; switch (obj->type) { case DRM_MODE_OBJECT_CONNECTOR: { struct drm_connector *connector = obj_to_connector(obj); WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); ret = drm_atomic_connector_get_property(connector, connector->state, property, val); break; } case DRM_MODE_OBJECT_CRTC: { struct drm_crtc *crtc = obj_to_crtc(obj); WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); ret = drm_atomic_crtc_get_property(crtc, crtc->state, property, val); break; } case DRM_MODE_OBJECT_PLANE: { struct drm_plane *plane = obj_to_plane(obj); WARN_ON(!drm_modeset_is_locked(&plane->mutex)); ret = drm_atomic_plane_get_property(plane, plane->state, property, val); break; } default: drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id); ret = -EINVAL; break; } return ret; } /* * The big monster ioctl */ static struct drm_pending_vblank_event *create_vblank_event( struct drm_crtc *crtc, uint64_t user_data) { struct drm_pending_vblank_event *e = NULL; e = kzalloc(sizeof *e, GFP_KERNEL); if (!e) return NULL; e->event.base.type = DRM_EVENT_FLIP_COMPLETE; e->event.base.length = sizeof(e->event); e->event.vbl.crtc_id = crtc->base.id; e->event.vbl.user_data = user_data; return e; } int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, struct drm_connector *connector, int mode) { struct drm_connector *tmp_connector; struct drm_connector_state *new_conn_state; struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int i, ret, old_mode = connector->dpms; bool active = false; ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex, state->acquire_ctx); if (ret) return ret; if (mode != DRM_MODE_DPMS_ON) mode = DRM_MODE_DPMS_OFF; connector->dpms = mode; crtc = connector->state->crtc; if (!crtc) goto out; ret = drm_atomic_add_affected_connectors(state, crtc); if (ret) goto out; crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); goto out; } for_each_new_connector_in_state(state, tmp_connector, new_conn_state, i) { if (new_conn_state->crtc != crtc) continue; if (tmp_connector->dpms == DRM_MODE_DPMS_ON) { active = true; break; } } crtc_state->active = active; ret = drm_atomic_commit(state); out: if (ret != 0) connector->dpms = old_mode; return ret; } static int drm_atomic_check_prop_changes(int ret, uint64_t old_val, uint64_t prop_value, struct drm_property *prop) { if (ret != 0 || old_val != prop_value) { drm_dbg_atomic(prop->dev, "[PROP:%d:%s] No prop can be changed during async flip\n", prop->base.id, prop->name); return -EINVAL; } return 0; } int drm_atomic_set_property(struct drm_atomic_state *state, struct drm_file *file_priv, struct drm_mode_object *obj, struct drm_property *prop, u64 prop_value, bool async_flip) { struct drm_mode_object *ref; u64 old_val; int ret; if (!drm_property_change_valid_get(prop, prop_value, &ref)) return -EINVAL; switch (obj->type) { case DRM_MODE_OBJECT_CONNECTOR: { struct drm_connector *connector = obj_to_connector(obj); struct drm_connector_state *connector_state; connector_state = drm_atomic_get_connector_state(state, connector); if (IS_ERR(connector_state)) { ret = PTR_ERR(connector_state); break; } if (async_flip) { ret = drm_atomic_connector_get_property(connector, connector_state, prop, &old_val); ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); break; } ret = drm_atomic_connector_set_property(connector, connector_state, file_priv, prop, prop_value); break; } case DRM_MODE_OBJECT_CRTC: { struct drm_crtc *crtc = obj_to_crtc(obj); struct drm_crtc_state *crtc_state; crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) { ret = PTR_ERR(crtc_state); break; } if (async_flip) { ret = drm_atomic_crtc_get_property(crtc, crtc_state, prop, &old_val); ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); break; } ret = drm_atomic_crtc_set_property(crtc, crtc_state, prop, prop_value); break; } case DRM_MODE_OBJECT_PLANE: { struct drm_plane *plane = obj_to_plane(obj); struct drm_plane_state *plane_state; struct drm_mode_config *config = &plane->dev->mode_config; plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { ret = PTR_ERR(plane_state); break; } if (async_flip && (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY || (prop != config->prop_fb_id && prop != config->prop_in_fence_fd && prop != config->prop_fb_damage_clips))) { ret = drm_atomic_plane_get_property(plane, plane_state, prop, &old_val); ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); break; } ret = drm_atomic_plane_set_property(plane, plane_state, file_priv, prop, prop_value); break; } default: drm_dbg_atomic(prop->dev, "[OBJECT:%d] has no properties\n", obj->id); ret = -EINVAL; break; } drm_property_change_valid_put(prop, ref); return ret; } /** * DOC: explicit fencing properties * * Explicit fencing allows userspace to control the buffer synchronization * between devices. A Fence or a group of fences are transferred to/from * userspace using Sync File fds and there are two DRM properties for that. * IN_FENCE_FD on each DRM Plane to send fences to the kernel and * OUT_FENCE_PTR on each DRM CRTC to receive fences from the kernel. * * As a contrast, with implicit fencing the kernel keeps track of any * ongoing rendering, and automatically ensures that the atomic update waits * for any pending rendering to complete. This is usually tracked in &struct * dma_resv which can also contain mandatory kernel fences. Implicit syncing * is how Linux traditionally worked (e.g. DRI2/3 on X.org), whereas explicit * fencing is what Android wants. * * "IN_FENCE_FD”: * Use this property to pass a fence that DRM should wait on before * proceeding with the Atomic Commit request and show the framebuffer for * the plane on the screen. The fence can be either a normal fence or a * merged one, the sync_file framework will handle both cases and use a * fence_array if a merged fence is received. Passing -1 here means no * fences to wait on. * * If the Atomic Commit request has the DRM_MODE_ATOMIC_TEST_ONLY flag * it will only check if the Sync File is a valid one. * * On the driver side the fence is stored on the @fence parameter of * &struct drm_plane_state. Drivers which also support implicit fencing * should extract the implicit fence using drm_gem_plane_helper_prepare_fb(), * to make sure there's consistent behaviour between drivers in precedence * of implicit vs. explicit fencing. * * "OUT_FENCE_PTR”: * Use this property to pass a file descriptor pointer to DRM. Once the * Atomic Commit request call returns OUT_FENCE_PTR will be filled with * the file descriptor number of a Sync File. This Sync File contains the * CRTC fence that will be signaled when all framebuffers present on the * Atomic Commit * request for that given CRTC are scanned out on the * screen. * * The Atomic Commit request fails if a invalid pointer is passed. If the * Atomic Commit request fails for any other reason the out fence fd * returned will be -1. On a Atomic Commit with the * DRM_MODE_ATOMIC_TEST_ONLY flag the out fence will also be set to -1. * * Note that out-fences don't have a special interface to drivers and are * internally represented by a &struct drm_pending_vblank_event in struct * &drm_crtc_state, which is also used by the nonblocking atomic commit * helpers and for the DRM event handling for existing userspace. */ struct drm_out_fence_state { s32 __user *out_fence_ptr; struct sync_file *sync_file; int fd; }; static int setup_out_fence(struct drm_out_fence_state *fence_state, struct dma_fence *fence) { fence_state->fd = get_unused_fd_flags(O_CLOEXEC); if (fence_state->fd < 0) return fence_state->fd; if (put_user(fence_state->fd, fence_state->out_fence_ptr)) return -EFAULT; fence_state->sync_file = sync_file_create(fence); if (!fence_state->sync_file) return -ENOMEM; return 0; } static int prepare_signaling(struct drm_device *dev, struct drm_atomic_state *state, struct drm_mode_atomic *arg, struct drm_file *file_priv, struct drm_out_fence_state **fence_state, unsigned int *num_fences) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; struct drm_connector *conn; struct drm_connector_state *conn_state; int i, c = 0, ret; if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) return 0; for_each_new_crtc_in_state(state, crtc, crtc_state, i) { s32 __user *fence_ptr; fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc); if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT || fence_ptr) { struct drm_pending_vblank_event *e; e = create_vblank_event(crtc, arg->user_data); if (!e) return -ENOMEM; crtc_state->event = e; } if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) { struct drm_pending_vblank_event *e = crtc_state->event; if (!file_priv) continue; ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); if (ret) { kfree(e); crtc_state->event = NULL; return ret; } } if (fence_ptr) { struct dma_fence *fence; struct drm_out_fence_state *f; f = krealloc(*fence_state, sizeof(**fence_state) * (*num_fences + 1), GFP_KERNEL); if (!f) return -ENOMEM; memset(&f[*num_fences], 0, sizeof(*f)); f[*num_fences].out_fence_ptr = fence_ptr; *fence_state = f; fence = drm_crtc_create_fence(crtc); if (!fence) return -ENOMEM; ret = setup_out_fence(&f[(*num_fences)++], fence); if (ret) { dma_fence_put(fence); return ret; } crtc_state->event->base.fence = fence; } c++; } for_each_new_connector_in_state(state, conn, conn_state, i) { struct drm_writeback_connector *wb_conn; struct drm_out_fence_state *f; struct dma_fence *fence; s32 __user *fence_ptr; if (!conn_state->writeback_job) continue; fence_ptr = get_out_fence_for_connector(state, conn); if (!fence_ptr) continue; f = krealloc(*fence_state, sizeof(**fence_state) * (*num_fences + 1), GFP_KERNEL); if (!f) return -ENOMEM; memset(&f[*num_fences], 0, sizeof(*f)); f[*num_fences].out_fence_ptr = fence_ptr; *fence_state = f; wb_conn = drm_connector_to_writeback(conn); fence = drm_writeback_get_out_fence(wb_conn); if (!fence) return -ENOMEM; ret = setup_out_fence(&f[(*num_fences)++], fence); if (ret) { dma_fence_put(fence); return ret; } conn_state->writeback_job->out_fence = fence; } /* * Having this flag means user mode pends on event which will never * reach due to lack of at least one CRTC for signaling */ if (c == 0 && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) { drm_dbg_atomic(dev, "need at least one CRTC for DRM_MODE_PAGE_FLIP_EVENT"); return -EINVAL; } return 0; } static void complete_signaling(struct drm_device *dev, struct drm_atomic_state *state, struct drm_out_fence_state *fence_state, unsigned int num_fences, bool install_fds) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int i; if (install_fds) { for (i = 0; i < num_fences; i++) fd_install(fence_state[i].fd, fence_state[i].sync_file->file); kfree(fence_state); return; } for_each_new_crtc_in_state(state, crtc, crtc_state, i) { struct drm_pending_vblank_event *event = crtc_state->event; /* * Free the allocated event. drm_atomic_helper_setup_commit * can allocate an event too, so only free it if it's ours * to prevent a double free in drm_atomic_state_clear. */ if (event && (event->base.fence || event->base.file_priv)) { drm_event_cancel_free(dev, &event->base); crtc_state->event = NULL; } } if (!fence_state) return; for (i = 0; i < num_fences; i++) { if (fence_state[i].sync_file) fput(fence_state[i].sync_file->file); if (fence_state[i].fd >= 0) put_unused_fd(fence_state[i].fd); /* If this fails log error to the user */ if (fence_state[i].out_fence_ptr && put_user(-1, fence_state[i].out_fence_ptr)) drm_dbg_atomic(dev, "Couldn't clear out_fence_ptr\n"); } kfree(fence_state); } static void set_async_flip(struct drm_atomic_state *state) { struct drm_crtc *crtc; struct drm_crtc_state *crtc_state; int i; for_each_new_crtc_in_state(state, crtc, crtc_state, i) { crtc_state->async_flip = true; } } int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_atomic *arg = data; uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr); uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr); uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr); uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr); unsigned int copied_objs, copied_props; struct drm_atomic_state *state; struct drm_modeset_acquire_ctx ctx; struct drm_out_fence_state *fence_state; int ret = 0; unsigned int i, j, num_fences; bool async_flip = false; /* disallow for drivers not supporting atomic: */ if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) return -EOPNOTSUPP; /* disallow for userspace that has not enabled atomic cap (even * though this may be a bit overkill, since legacy userspace * wouldn't know how to call this ioctl) */ if (!file_priv->atomic) { drm_dbg_atomic(dev, "commit failed: atomic cap not enabled\n"); return -EINVAL; } if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS) { drm_dbg_atomic(dev, "commit failed: invalid flag\n"); return -EINVAL; } if (arg->reserved) { drm_dbg_atomic(dev, "commit failed: reserved field set\n"); return -EINVAL; } if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) { if (!dev->mode_config.async_page_flip) { drm_dbg_atomic(dev, "commit failed: DRM_MODE_PAGE_FLIP_ASYNC not supported\n"); return -EINVAL; } async_flip = true; } /* can't test and expect an event at the same time. */ if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) && (arg->flags & DRM_MODE_PAGE_FLIP_EVENT)) { drm_dbg_atomic(dev, "commit failed: page-flip event requested with test-only commit\n"); return -EINVAL; } state = drm_atomic_state_alloc(dev); if (!state) return -ENOMEM; drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); state->acquire_ctx = &ctx; state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET); retry: copied_objs = 0; copied_props = 0; fence_state = NULL; num_fences = 0; for (i = 0; i < arg->count_objs; i++) { uint32_t obj_id, count_props; struct drm_mode_object *obj; if (get_user(obj_id, objs_ptr + copied_objs)) { ret = -EFAULT; goto out; } obj = drm_mode_object_find(dev, file_priv, obj_id, DRM_MODE_OBJECT_ANY); if (!obj) { drm_dbg_atomic(dev, "cannot find object ID %d", obj_id); ret = -ENOENT; goto out; } if (!obj->properties) { drm_dbg_atomic(dev, "[OBJECT:%d] has no properties", obj_id); drm_mode_object_put(obj); ret = -ENOENT; goto out; } if (get_user(count_props, count_props_ptr + copied_objs)) { drm_mode_object_put(obj); ret = -EFAULT; goto out; } copied_objs++; for (j = 0; j < count_props; j++) { uint32_t prop_id; uint64_t prop_value; struct drm_property *prop; if (get_user(prop_id, props_ptr + copied_props)) { drm_mode_object_put(obj); ret = -EFAULT; goto out; } prop = drm_mode_obj_find_prop_id(obj, prop_id); if (!prop) { drm_dbg_atomic(dev, "[OBJECT:%d] cannot find property ID %d", obj_id, prop_id); drm_mode_object_put(obj); ret = -ENOENT; goto out; } if (copy_from_user(&prop_value, prop_values_ptr + copied_props, sizeof(prop_value))) { drm_mode_object_put(obj); ret = -EFAULT; goto out; } ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value, async_flip); if (ret) { drm_mode_object_put(obj); goto out; } copied_props++; } drm_mode_object_put(obj); } ret = prepare_signaling(dev, state, arg, file_priv, &fence_state, &num_fences); if (ret) goto out; if (arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) set_async_flip(state); if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { ret = drm_atomic_check_only(state); } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { ret = drm_atomic_nonblocking_commit(state); } else { ret = drm_atomic_commit(state); } out: complete_signaling(dev, state, fence_state, num_fences, !ret); if (ret == -EDEADLK) { drm_atomic_state_clear(state); ret = drm_modeset_backoff(&ctx); if (!ret) goto retry; } drm_atomic_state_put(state); drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); return ret; }
120 7 6 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _KBD_KERN_H #define _KBD_KERN_H #include <linux/tty.h> #include <linux/interrupt.h> #include <linux/keyboard.h> extern char *func_table[MAX_NR_FUNC]; /* * kbd->xxx contains the VC-local things (flag settings etc..) * * Note: externally visible are LED_SCR, LED_NUM, LED_CAP defined in kd.h * The code in KDGETLED / KDSETLED depends on the internal and * external order being the same. * * Note: lockstate is used as index in the array key_map. */ struct kbd_struct { unsigned char lockstate; /* 8 modifiers - the names do not have any meaning at all; they can be associated to arbitrarily chosen keys */ #define VC_SHIFTLOCK KG_SHIFT /* shift lock mode */ #define VC_ALTGRLOCK KG_ALTGR /* altgr lock mode */ #define VC_CTRLLOCK KG_CTRL /* control lock mode */ #define VC_ALTLOCK KG_ALT /* alt lock mode */ #define VC_SHIFTLLOCK KG_SHIFTL /* shiftl lock mode */ #define VC_SHIFTRLOCK KG_SHIFTR /* shiftr lock mode */ #define VC_CTRLLLOCK KG_CTRLL /* ctrll lock mode */ #define VC_CTRLRLOCK KG_CTRLR /* ctrlr lock mode */ unsigned char slockstate; /* for `sticky' Shift, Ctrl, etc. */ unsigned char ledmode:1; #define LED_SHOW_FLAGS 0 /* traditional state */ #define LED_SHOW_IOCTL 1 /* only change leds upon ioctl */ unsigned char ledflagstate:4; /* flags, not lights */ unsigned char default_ledflagstate:4; #define VC_SCROLLOCK 0 /* scroll-lock mode */ #define VC_NUMLOCK 1 /* numeric lock mode */ #define VC_CAPSLOCK 2 /* capslock mode */ #define VC_KANALOCK 3 /* kanalock mode */ unsigned char kbdmode:3; /* one 3-bit value */ #define VC_XLATE 0 /* translate keycodes using keymap */ #define VC_MEDIUMRAW 1 /* medium raw (keycode) mode */ #define VC_RAW 2 /* raw (scancode) mode */ #define VC_UNICODE 3 /* Unicode mode */ #define VC_OFF 4 /* disabled mode */ unsigned char modeflags:5; #define VC_APPLIC 0 /* application key mode */ #define VC_CKMODE 1 /* cursor key mode */ #define VC_REPEAT 2 /* keyboard repeat */ #define VC_CRLF 3 /* 0 - enter sends CR, 1 - enter sends CRLF */ #define VC_META 4 /* 0 - meta, 1 - meta=prefix with ESC */ }; extern int kbd_init(void); extern void setledstate(struct kbd_struct *kbd, unsigned int led); extern int do_poke_blanked_console; extern void (*kbd_ledfunc)(unsigned int led); extern int set_console(int nr); extern void schedule_console_callback(void); static inline int vc_kbd_mode(struct kbd_struct * kbd, int flag) { return ((kbd->modeflags >> flag) & 1); } static inline int vc_kbd_led(struct kbd_struct * kbd, int flag) { return ((kbd->ledflagstate >> flag) & 1); } static inline void set_vc_kbd_mode(struct kbd_struct * kbd, int flag) { kbd->modeflags |= 1 << flag; } static inline void set_vc_kbd_led(struct kbd_struct * kbd, int flag) { kbd->ledflagstate |= 1 << flag; } static inline void clr_vc_kbd_mode(struct kbd_struct * kbd, int flag) { kbd->modeflags &= ~(1 << flag); } static inline void clr_vc_kbd_led(struct kbd_struct * kbd, int flag) { kbd->ledflagstate &= ~(1 << flag); } static inline void chg_vc_kbd_lock(struct kbd_struct * kbd, int flag) { kbd->lockstate ^= 1 << flag; } static inline void chg_vc_kbd_slock(struct kbd_struct * kbd, int flag) { kbd->slockstate ^= 1 << flag; } static inline void chg_vc_kbd_mode(struct kbd_struct * kbd, int flag) { kbd->modeflags ^= 1 << flag; } static inline void chg_vc_kbd_led(struct kbd_struct * kbd, int flag) { kbd->ledflagstate ^= 1 << flag; } #define U(x) ((x) ^ 0xf000) #define BRL_UC_ROW 0x2800 /* keyboard.c */ struct console; void vt_set_leds_compute_shiftstate(void); /* defkeymap.c */ extern unsigned int keymap_count; #endif
24 24 2 25 2 7 7 1 1 18 15 3 3 2 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 // SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <net/netfilter/nf_queue.h> #include <net/ip6_checksum.h> #ifdef CONFIG_INET __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u8 protocol) { const struct iphdr *iph = ip_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) break; if ((protocol != IPPROTO_TCP && protocol != IPPROTO_UDP && !csum_fold(skb->csum)) || !csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - dataoff, protocol, skb->csum)) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } fallthrough; case CHECKSUM_NONE: if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) skb->csum = 0; else skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len - dataoff, protocol, 0); csum = __skb_checksum_complete(skb); } return csum; } EXPORT_SYMBOL(nf_ip_checksum); #endif static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u8 protocol) { const struct iphdr *iph = ip_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip_checksum(skb, hook, dataoff, protocol); fallthrough; case CHECKSUM_NONE: skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, skb->len - dataoff, 0); skb->ip_summed = CHECKSUM_NONE; return __skb_checksum_complete_head(skb, dataoff + len); } return csum; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u8 protocol) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) break; if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(skb->csum, skb_checksum(skb, 0, dataoff, 0)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } fallthrough; case CHECKSUM_NONE: skb->csum = ~csum_unfold( csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, skb_checksum(skb, 0, dataoff, 0)))); csum = __skb_checksum_complete(skb); } return csum; } EXPORT_SYMBOL(nf_ip6_checksum); static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u8 protocol) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); __wsum hsum; __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip6_checksum(skb, hook, dataoff, protocol); fallthrough; case CHECKSUM_NONE: hsum = skb_checksum(skb, 0, dataoff, 0); skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, hsum))); skb->ip_summed = CHECKSUM_NONE; return __skb_checksum_complete_head(skb, dataoff + len); } return csum; }; __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u8 protocol, unsigned short family) { __sum16 csum = 0; switch (family) { case AF_INET: csum = nf_ip_checksum(skb, hook, dataoff, protocol); break; case AF_INET6: csum = nf_ip6_checksum(skb, hook, dataoff, protocol); break; } return csum; } EXPORT_SYMBOL_GPL(nf_checksum); __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u8 protocol, unsigned short family) { __sum16 csum = 0; switch (family) { case AF_INET: csum = nf_ip_checksum_partial(skb, hook, dataoff, len, protocol); break; case AF_INET6: csum = nf_ip6_checksum_partial(skb, hook, dataoff, len, protocol); break; } return csum; } EXPORT_SYMBOL_GPL(nf_checksum_partial); int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict, unsigned short family) { const struct nf_ipv6_ops *v6ops __maybe_unused; int ret = 0; switch (family) { case AF_INET: ret = nf_ip_route(net, dst, fl, strict); break; case AF_INET6: ret = nf_ip6_route(net, dst, fl, strict); break; } return ret; } EXPORT_SYMBOL_GPL(nf_route); /* Only get and check the lengths, not do any hop-by-hop stuff. */ int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen) { int len, off = sizeof(struct ipv6hdr); unsigned char *nh; if (!pskb_may_pull(skb, off + 8)) return -ENOMEM; nh = (unsigned char *)(ipv6_hdr(skb) + 1); len = (nh[1] + 1) << 3; if (!pskb_may_pull(skb, off + len)) return -ENOMEM; nh = skb_network_header(skb); off += 2; len -= 2; while (len > 0) { int optlen; if (nh[off] == IPV6_TLV_PAD1) { off++; len--; continue; } if (len < 2) return -EBADMSG; optlen = nh[off + 1] + 2; if (optlen > len) return -EBADMSG; if (nh[off] == IPV6_TLV_JUMBO) { u32 pkt_len; if (nh[off + 1] != 4 || (off & 3) != 2) return -EBADMSG; pkt_len = ntohl(*(__be32 *)(nh + off + 2)); if (pkt_len <= IPV6_MAXPLEN || ipv6_hdr(skb)->payload_len) return -EBADMSG; if (pkt_len > skb->len - sizeof(struct ipv6hdr)) return -EBADMSG; *plen = pkt_len; } off += optlen; len -= optlen; } return len ? -EBADMSG : 0; } EXPORT_SYMBOL_GPL(nf_ip6_check_hbh_len);
4 3 1 1 1 1 4 3 1 3 3 19 2 1 3 13 5 2 3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/if_arp.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables_offload.h> #include <net/netfilter/nf_tables.h> struct nft_cmp_expr { struct nft_data data; u8 sreg; u8 len; enum nft_cmp_ops op:8; }; void nft_cmp_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { const struct nft_cmp_expr *priv = nft_expr_priv(expr); int d; d = memcmp(&regs->data[priv->sreg], &priv->data, priv->len); switch (priv->op) { case NFT_CMP_EQ: if (d != 0) goto mismatch; break; case NFT_CMP_NEQ: if (d == 0) goto mismatch; break; case NFT_CMP_LT: if (d == 0) goto mismatch; fallthrough; case NFT_CMP_LTE: if (d > 0) goto mismatch; break; case NFT_CMP_GT: if (d == 0) goto mismatch; fallthrough; case NFT_CMP_GTE: if (d < 0) goto mismatch; break; } return; mismatch: regs->verdict.code = NFT_BREAK; } static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = { [NFTA_CMP_SREG] = { .type = NLA_U32 }, [NFTA_CMP_OP] = { .type = NLA_U32 }, [NFTA_CMP_DATA] = { .type = NLA_NESTED }, }; static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_cmp_expr *priv = nft_expr_priv(expr); struct nft_data_desc desc = { .type = NFT_DATA_VALUE, .size = sizeof(priv->data), }; int err; err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]); if (err < 0) return err; err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len); if (err < 0) return err; priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP])); priv->len = desc.len; return 0; } static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_cmp_expr *priv = nft_expr_priv(expr); if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op))) goto nla_put_failure; if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data, NFT_DATA_VALUE, priv->len) < 0) goto nla_put_failure; return 0; nla_put_failure: return -1; } union nft_cmp_offload_data { u16 val16; u32 val32; u64 val64; }; static void nft_payload_n2h(union nft_cmp_offload_data *data, const u8 *val, u32 len) { switch (len) { case 2: data->val16 = ntohs(*((__be16 *)val)); break; case 4: data->val32 = ntohl(*((__be32 *)val)); break; case 8: data->val64 = be64_to_cpu(*((__be64 *)val)); break; default: WARN_ON_ONCE(1); break; } } static int __nft_cmp_offload(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_cmp_expr *priv) { struct nft_offload_reg *reg = &ctx->regs[priv->sreg]; union nft_cmp_offload_data _data, _datamask; u8 *mask = (u8 *)&flow->match.mask; u8 *key = (u8 *)&flow->match.key; u8 *data, *datamask; if (priv->op != NFT_CMP_EQ || priv->len > reg->len) return -EOPNOTSUPP; if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) { nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len); nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len); data = (u8 *)&_data; datamask = (u8 *)&_datamask; } else { data = (u8 *)&priv->data; datamask = (u8 *)&reg->mask; } memcpy(key + reg->offset, data, reg->len); memcpy(mask + reg->offset, datamask, reg->len); flow->match.dissector.used_keys |= BIT_ULL(reg->key); flow->match.dissector.offset[reg->key] = reg->base_offset; if (reg->key == FLOW_DISSECTOR_KEY_META && reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) && nft_reg_load16(priv->data.data) != ARPHRD_ETHER) return -EOPNOTSUPP; nft_offload_update_dependency(ctx, &priv->data, reg->len); return 0; } static int nft_cmp_offload(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_expr *expr) { const struct nft_cmp_expr *priv = nft_expr_priv(expr); return __nft_cmp_offload(ctx, flow, priv); } static const struct nft_expr_ops nft_cmp_ops = { .type = &nft_cmp_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)), .eval = nft_cmp_eval, .init = nft_cmp_init, .dump = nft_cmp_dump, .reduce = NFT_REDUCE_READONLY, .offload = nft_cmp_offload, }; /* Calculate the mask for the nft_cmp_fast expression. On big endian the * mask needs to include the *upper* bytes when interpreting that data as * something smaller than the full u32, therefore a cpu_to_le32 is done. */ static u32 nft_cmp_fast_mask(unsigned int len) { __le32 mask = cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr, data) * BITS_PER_BYTE - len)); return (__force u32)mask; } static int nft_cmp_fast_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); struct nft_data data; struct nft_data_desc desc = { .type = NFT_DATA_VALUE, .size = sizeof(data), }; int err; err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]); if (err < 0) return err; err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len); if (err < 0) return err; desc.len *= BITS_PER_BYTE; priv->mask = nft_cmp_fast_mask(desc.len); priv->data = data.data[0] & priv->mask; priv->len = desc.len; priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ; return 0; } static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_expr *expr) { const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); struct nft_cmp_expr cmp = { .data = { .data = { [0] = priv->data, }, }, .sreg = priv->sreg, .len = priv->len / BITS_PER_BYTE, .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ, }; return __nft_cmp_offload(ctx, flow, &cmp); } static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr); enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ; struct nft_data data; if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op))) goto nla_put_failure; data.data[0] = priv->data; if (nft_data_dump(skb, NFTA_CMP_DATA, &data, NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0) goto nla_put_failure; return 0; nla_put_failure: return -1; } const struct nft_expr_ops nft_cmp_fast_ops = { .type = &nft_cmp_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)), .eval = NULL, /* inlined */ .init = nft_cmp_fast_init, .dump = nft_cmp_fast_dump, .reduce = NFT_REDUCE_READONLY, .offload = nft_cmp_fast_offload, }; static u32 nft_cmp_mask(u32 bitlen) { return (__force u32)cpu_to_le32(~0U >> (sizeof(u32) * BITS_PER_BYTE - bitlen)); } static void nft_cmp16_fast_mask(struct nft_data *data, unsigned int bitlen) { int len = bitlen / BITS_PER_BYTE; int i, words = len / sizeof(u32); for (i = 0; i < words; i++) { data->data[i] = 0xffffffff; bitlen -= sizeof(u32) * BITS_PER_BYTE; } if (len % sizeof(u32)) data->data[i++] = nft_cmp_mask(bitlen); for (; i < 4; i++) data->data[i] = 0; } static int nft_cmp16_fast_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr); struct nft_data_desc desc = { .type = NFT_DATA_VALUE, .size = sizeof(priv->data), }; int err; err = nft_data_init(NULL, &priv->data, &desc, tb[NFTA_CMP_DATA]); if (err < 0) return err; err = nft_parse_register_load(ctx, tb[NFTA_CMP_SREG], &priv->sreg, desc.len); if (err < 0) return err; nft_cmp16_fast_mask(&priv->mask, desc.len * BITS_PER_BYTE); priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ; priv->len = desc.len; return 0; } static int nft_cmp16_fast_offload(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_expr *expr) { const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr); struct nft_cmp_expr cmp = { .data = priv->data, .sreg = priv->sreg, .len = priv->len, .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ, }; return __nft_cmp_offload(ctx, flow, &cmp); } static int nft_cmp16_fast_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { const struct nft_cmp16_fast_expr *priv = nft_expr_priv(expr); enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ; if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op))) goto nla_put_failure; if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data, NFT_DATA_VALUE, priv->len) < 0) goto nla_put_failure; return 0; nla_put_failure: return -1; } const struct nft_expr_ops nft_cmp16_fast_ops = { .type = &nft_cmp_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp16_fast_expr)), .eval = NULL, /* inlined */ .init = nft_cmp16_fast_init, .dump = nft_cmp16_fast_dump, .reduce = NFT_REDUCE_READONLY, .offload = nft_cmp16_fast_offload, }; static const struct nft_expr_ops * nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_data data; struct nft_data_desc desc = { .type = NFT_DATA_VALUE, .size = sizeof(data), }; enum nft_cmp_ops op; u8 sreg; int err; if (tb[NFTA_CMP_SREG] == NULL || tb[NFTA_CMP_OP] == NULL || tb[NFTA_CMP_DATA] == NULL) return ERR_PTR(-EINVAL); op = ntohl(nla_get_be32(tb[NFTA_CMP_OP])); switch (op) { case NFT_CMP_EQ: case NFT_CMP_NEQ: case NFT_CMP_LT: case NFT_CMP_LTE: case NFT_CMP_GT: case NFT_CMP_GTE: break; default: return ERR_PTR(-EINVAL); } err = nft_data_init(NULL, &data, &desc, tb[NFTA_CMP_DATA]); if (err < 0) return ERR_PTR(err); sreg = ntohl(nla_get_be32(tb[NFTA_CMP_SREG])); if (op == NFT_CMP_EQ || op == NFT_CMP_NEQ) { if (desc.len <= sizeof(u32)) return &nft_cmp_fast_ops; else if (desc.len <= sizeof(data) && ((sreg >= NFT_REG_1 && sreg <= NFT_REG_4) || (sreg >= NFT_REG32_00 && sreg <= NFT_REG32_12 && sreg % 2 == 0))) return &nft_cmp16_fast_ops; } return &nft_cmp_ops; } struct nft_expr_type nft_cmp_type __read_mostly = { .name = "cmp", .select_ops = nft_cmp_select_ops, .policy = nft_cmp_policy, .maxattr = NFTA_CMP_MAX, .owner = THIS_MODULE, };
18 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 // SPDX-License-Identifier: GPL-2.0-only /* * mac80211 debugfs for wireless PHYs * * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2018 - 2019, 2021-2024 Intel Corporation */ #include <linux/debugfs.h> #include <linux/rtnetlink.h> #include <linux/vmalloc.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "rate.h" #include "debugfs.h" #define DEBUGFS_FORMAT_BUFFER_SIZE 100 int mac80211_format_buffer(char __user *userbuf, size_t count, loff_t *ppos, char *fmt, ...) { va_list args; char buf[DEBUGFS_FORMAT_BUFFER_SIZE]; int res; va_start(args, fmt); res = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); return simple_read_from_buffer(userbuf, count, ppos, buf, res); } #define DEBUGFS_READONLY_FILE_FN(name, fmt, value...) \ static ssize_t name## _read(struct file *file, char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ struct ieee80211_local *local = file->private_data; \ \ return mac80211_format_buffer(userbuf, count, ppos, \ fmt "\n", ##value); \ } #define DEBUGFS_READONLY_FILE_OPS(name) \ static const struct debugfs_short_fops name## _ops = { \ .read = name## _read, \ .llseek = generic_file_llseek, \ }; #define DEBUGFS_READONLY_FILE(name, fmt, value...) \ DEBUGFS_READONLY_FILE_FN(name, fmt, value) \ DEBUGFS_READONLY_FILE_OPS(name) #define DEBUGFS_ADD(name) \ debugfs_create_file(#name, 0400, phyd, local, &name## _ops) #define DEBUGFS_ADD_MODE(name, mode) \ debugfs_create_file(#name, mode, phyd, local, &name## _ops); DEBUGFS_READONLY_FILE(hw_conf, "%x", local->hw.conf.flags); DEBUGFS_READONLY_FILE(user_power, "%d", local->user_power_level); DEBUGFS_READONLY_FILE(power, "%d", local->hw.conf.power_level); DEBUGFS_READONLY_FILE(total_ps_buffered, "%d", local->total_ps_buffered); DEBUGFS_READONLY_FILE(wep_iv, "%#08x", local->wep_iv & 0xffffff); DEBUGFS_READONLY_FILE(rate_ctrl_alg, "%s", local->rate_ctrl ? local->rate_ctrl->ops->name : "hw/driver"); static ssize_t aqm_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; struct fq *fq = &local->fq; char buf[200]; int len = 0; spin_lock_bh(&local->fq.lock); rcu_read_lock(); len = scnprintf(buf, sizeof(buf), "access name value\n" "R fq_flows_cnt %u\n" "R fq_backlog %u\n" "R fq_overlimit %u\n" "R fq_overmemory %u\n" "R fq_collisions %u\n" "R fq_memory_usage %u\n" "RW fq_memory_limit %u\n" "RW fq_limit %u\n" "RW fq_quantum %u\n", fq->flows_cnt, fq->backlog, fq->overmemory, fq->overlimit, fq->collisions, fq->memory_usage, fq->memory_limit, fq->limit, fq->quantum); rcu_read_unlock(); spin_unlock_bh(&local->fq.lock); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t aqm_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[100]; if (count >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, user_buf, count)) return -EFAULT; if (count && buf[count - 1] == '\n') buf[count - 1] = '\0'; else buf[count] = '\0'; if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1) return count; else if (sscanf(buf, "fq_memory_limit %u", &local->fq.memory_limit) == 1) return count; else if (sscanf(buf, "fq_quantum %u", &local->fq.quantum) == 1) return count; return -EINVAL; } static const struct debugfs_short_fops aqm_ops = { .write = aqm_write, .read = aqm_read, .llseek = default_llseek, }; static ssize_t airtime_flags_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[128] = {}, *pos, *end; pos = buf; end = pos + sizeof(buf) - 1; if (local->airtime_flags & AIRTIME_USE_TX) pos += scnprintf(pos, end - pos, "AIRTIME_TX\t(%lx)\n", AIRTIME_USE_TX); if (local->airtime_flags & AIRTIME_USE_RX) pos += scnprintf(pos, end - pos, "AIRTIME_RX\t(%lx)\n", AIRTIME_USE_RX); return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); } static ssize_t airtime_flags_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[16]; if (count >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, user_buf, count)) return -EFAULT; if (count && buf[count - 1] == '\n') buf[count - 1] = '\0'; else buf[count] = '\0'; if (kstrtou16(buf, 0, &local->airtime_flags)) return -EINVAL; return count; } static const struct debugfs_short_fops airtime_flags_ops = { .write = airtime_flags_write, .read = airtime_flags_read, .llseek = default_llseek, }; static ssize_t aql_pending_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[400]; int len = 0; len = scnprintf(buf, sizeof(buf), "AC AQL pending\n" "VO %u us\n" "VI %u us\n" "BE %u us\n" "BK %u us\n" "total %u us\n", atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_VO]), atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_VI]), atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_BE]), atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_BK]), atomic_read(&local->aql_total_pending_airtime)); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static const struct debugfs_short_fops aql_pending_ops = { .read = aql_pending_read, .llseek = default_llseek, }; static ssize_t aql_txq_limit_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[400]; int len = 0; len = scnprintf(buf, sizeof(buf), "AC AQL limit low AQL limit high\n" "VO %u %u\n" "VI %u %u\n" "BE %u %u\n" "BK %u %u\n", local->aql_txq_limit_low[IEEE80211_AC_VO], local->aql_txq_limit_high[IEEE80211_AC_VO], local->aql_txq_limit_low[IEEE80211_AC_VI], local->aql_txq_limit_high[IEEE80211_AC_VI], local->aql_txq_limit_low[IEEE80211_AC_BE], local->aql_txq_limit_high[IEEE80211_AC_BE], local->aql_txq_limit_low[IEEE80211_AC_BK], local->aql_txq_limit_high[IEEE80211_AC_BK]); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t aql_txq_limit_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[100]; u32 ac, q_limit_low, q_limit_high, q_limit_low_old, q_limit_high_old; struct sta_info *sta; if (count >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, user_buf, count)) return -EFAULT; if (count && buf[count - 1] == '\n') buf[count - 1] = '\0'; else buf[count] = '\0'; if (sscanf(buf, "%u %u %u", &ac, &q_limit_low, &q_limit_high) != 3) return -EINVAL; if (ac >= IEEE80211_NUM_ACS) return -EINVAL; q_limit_low_old = local->aql_txq_limit_low[ac]; q_limit_high_old = local->aql_txq_limit_high[ac]; wiphy_lock(local->hw.wiphy); local->aql_txq_limit_low[ac] = q_limit_low; local->aql_txq_limit_high[ac] = q_limit_high; list_for_each_entry(sta, &local->sta_list, list) { /* If a sta has customized queue limits, keep it */ if (sta->airtime[ac].aql_limit_low == q_limit_low_old && sta->airtime[ac].aql_limit_high == q_limit_high_old) { sta->airtime[ac].aql_limit_low = q_limit_low; sta->airtime[ac].aql_limit_high = q_limit_high; } } wiphy_unlock(local->hw.wiphy); return count; } static const struct debugfs_short_fops aql_txq_limit_ops = { .write = aql_txq_limit_write, .read = aql_txq_limit_read, .llseek = default_llseek, }; static ssize_t aql_enable_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[3]; int len; len = scnprintf(buf, sizeof(buf), "%d\n", !static_key_false(&aql_disable.key)); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t aql_enable_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { bool aql_disabled = static_key_false(&aql_disable.key); char buf[3]; size_t len; if (count > sizeof(buf)) return -EINVAL; if (copy_from_user(buf, user_buf, count)) return -EFAULT; buf[sizeof(buf) - 1] = '\0'; len = strlen(buf); if (len > 0 && buf[len - 1] == '\n') buf[len - 1] = 0; if (buf[0] == '0' && buf[1] == '\0') { if (!aql_disabled) static_branch_inc(&aql_disable); } else if (buf[0] == '1' && buf[1] == '\0') { if (aql_disabled) static_branch_dec(&aql_disable); } else { return -EINVAL; } return count; } static const struct debugfs_short_fops aql_enable_ops = { .write = aql_enable_write, .read = aql_enable_read, .llseek = default_llseek, }; static ssize_t force_tx_status_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[3]; int len = 0; len = scnprintf(buf, sizeof(buf), "%d\n", (int)local->force_tx_status); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t force_tx_status_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; char buf[3]; if (count >= sizeof(buf)) return -EINVAL; if (copy_from_user(buf, user_buf, count)) return -EFAULT; if (count && buf[count - 1] == '\n') buf[count - 1] = '\0'; else buf[count] = '\0'; if (buf[0] == '0' && buf[1] == '\0') local->force_tx_status = 0; else if (buf[0] == '1' && buf[1] == '\0') local->force_tx_status = 1; else return -EINVAL; return count; } static const struct debugfs_short_fops force_tx_status_ops = { .write = force_tx_status_write, .read = force_tx_status_read, .llseek = default_llseek, }; #ifdef CONFIG_PM static ssize_t reset_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; int ret; rtnl_lock(); wiphy_lock(local->hw.wiphy); __ieee80211_suspend(&local->hw, NULL); ret = __ieee80211_resume(&local->hw); wiphy_unlock(local->hw.wiphy); if (ret) cfg80211_shutdown_all_interfaces(local->hw.wiphy); rtnl_unlock(); return count; } static const struct debugfs_short_fops reset_ops = { .write = reset_write, .llseek = noop_llseek, }; #endif static const char *hw_flag_names[] = { #define FLAG(F) [IEEE80211_HW_##F] = #F FLAG(HAS_RATE_CONTROL), FLAG(RX_INCLUDES_FCS), FLAG(HOST_BROADCAST_PS_BUFFERING), FLAG(SIGNAL_UNSPEC), FLAG(SIGNAL_DBM), FLAG(NEED_DTIM_BEFORE_ASSOC), FLAG(SPECTRUM_MGMT), FLAG(AMPDU_AGGREGATION), FLAG(SUPPORTS_PS), FLAG(PS_NULLFUNC_STACK), FLAG(SUPPORTS_DYNAMIC_PS), FLAG(MFP_CAPABLE), FLAG(WANT_MONITOR_VIF), FLAG(NO_VIRTUAL_MONITOR), FLAG(NO_AUTO_VIF), FLAG(SW_CRYPTO_CONTROL), FLAG(SUPPORT_FAST_XMIT), FLAG(REPORTS_TX_ACK_STATUS), FLAG(CONNECTION_MONITOR), FLAG(QUEUE_CONTROL), FLAG(SUPPORTS_PER_STA_GTK), FLAG(AP_LINK_PS), FLAG(TX_AMPDU_SETUP_IN_HW), FLAG(SUPPORTS_RC_TABLE), FLAG(P2P_DEV_ADDR_FOR_INTF), FLAG(TIMING_BEACON_ONLY), FLAG(SUPPORTS_HT_CCK_RATES), FLAG(CHANCTX_STA_CSA), FLAG(SUPPORTS_CLONED_SKBS), FLAG(SINGLE_SCAN_ON_ALL_BANDS), FLAG(TDLS_WIDER_BW), FLAG(SUPPORTS_AMSDU_IN_AMPDU), FLAG(BEACON_TX_STATUS), FLAG(NEEDS_UNIQUE_STA_ADDR), FLAG(SUPPORTS_REORDERING_BUFFER), FLAG(USES_RSS), FLAG(TX_AMSDU), FLAG(TX_FRAG_LIST), FLAG(REPORTS_LOW_ACK), FLAG(SUPPORTS_TX_FRAG), FLAG(SUPPORTS_TDLS_BUFFER_STA), FLAG(DOESNT_SUPPORT_QOS_NDP), FLAG(BUFF_MMPDU_TXQ), FLAG(SUPPORTS_VHT_EXT_NSS_BW), FLAG(STA_MMPDU_TXQ), FLAG(TX_STATUS_NO_AMPDU_LEN), FLAG(SUPPORTS_MULTI_BSSID), FLAG(SUPPORTS_ONLY_HE_MULTI_BSSID), FLAG(AMPDU_KEYBORDER_SUPPORT), FLAG(SUPPORTS_TX_ENCAP_OFFLOAD), FLAG(SUPPORTS_RX_DECAP_OFFLOAD), FLAG(SUPPORTS_CONC_MON_RX_DECAP), FLAG(DETECTS_COLOR_COLLISION), FLAG(MLO_MCAST_MULTI_LINK_TX), FLAG(DISALLOW_PUNCTURING), FLAG(DISALLOW_PUNCTURING_5GHZ), FLAG(HANDLES_QUIET_CSA), #undef FLAG }; static ssize_t hwflags_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; size_t bufsz = 30 * NUM_IEEE80211_HW_FLAGS; char *buf = kzalloc(bufsz, GFP_KERNEL); char *pos = buf, *end = buf + bufsz - 1; ssize_t rv; int i; if (!buf) return -ENOMEM; /* fail compilation if somebody adds or removes * a flag without updating the name array above */ BUILD_BUG_ON(ARRAY_SIZE(hw_flag_names) != NUM_IEEE80211_HW_FLAGS); for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) { if (test_bit(i, local->hw.flags)) pos += scnprintf(pos, end - pos, "%s\n", hw_flag_names[i]); } rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); kfree(buf); return rv; } static ssize_t misc_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; /* Max len of each line is 16 characters, plus 9 for 'pending:\n' */ size_t bufsz = IEEE80211_MAX_QUEUES * 16 + 9; char *buf; char *pos, *end; ssize_t rv; int i; int ln; buf = kzalloc(bufsz, GFP_KERNEL); if (!buf) return -ENOMEM; pos = buf; end = buf + bufsz - 1; pos += scnprintf(pos, end - pos, "pending:\n"); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { ln = skb_queue_len(&local->pending[i]); pos += scnprintf(pos, end - pos, "[%i] %d\n", i, ln); } rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); kfree(buf); return rv; } static ssize_t queues_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct ieee80211_local *local = file->private_data; unsigned long flags; char buf[IEEE80211_MAX_QUEUES * 20]; int q, res = 0; spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (q = 0; q < local->hw.queues; q++) res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q, local->queue_stop_reasons[q], skb_queue_len(&local->pending[q])); spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); return simple_read_from_buffer(user_buf, count, ppos, buf, res); } DEBUGFS_READONLY_FILE_OPS(hwflags); DEBUGFS_READONLY_FILE_OPS(queues); DEBUGFS_READONLY_FILE_OPS(misc); /* statistics stuff */ static ssize_t format_devstat_counter(struct ieee80211_local *local, char __user *userbuf, size_t count, loff_t *ppos, int (*printvalue)(struct ieee80211_low_level_stats *stats, char *buf, int buflen)) { struct ieee80211_low_level_stats stats; char buf[20]; int res; wiphy_lock(local->hw.wiphy); res = drv_get_stats(local, &stats); wiphy_unlock(local->hw.wiphy); if (res) return res; res = printvalue(&stats, buf, sizeof(buf)); return simple_read_from_buffer(userbuf, count, ppos, buf, res); } #define DEBUGFS_DEVSTATS_FILE(name) \ static int print_devstats_##name(struct ieee80211_low_level_stats *stats,\ char *buf, int buflen) \ { \ return scnprintf(buf, buflen, "%u\n", stats->name); \ } \ static ssize_t stats_ ##name## _read(struct file *file, \ char __user *userbuf, \ size_t count, loff_t *ppos) \ { \ return format_devstat_counter(file->private_data, \ userbuf, \ count, \ ppos, \ print_devstats_##name); \ } \ \ static const struct debugfs_short_fops stats_ ##name## _ops = { \ .read = stats_ ##name## _read, \ .llseek = generic_file_llseek, \ }; #ifdef CONFIG_MAC80211_DEBUG_COUNTERS #define DEBUGFS_STATS_ADD(name) \ debugfs_create_u32(#name, 0400, statsd, &local->name); #endif #define DEBUGFS_DEVSTATS_ADD(name) \ debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); void debugfs_hw_add(struct ieee80211_local *local) { struct dentry *phyd = local->hw.wiphy->debugfsdir; struct dentry *statsd; if (!phyd) return; local->debugfs.keys = debugfs_create_dir("keys", phyd); DEBUGFS_ADD(total_ps_buffered); DEBUGFS_ADD(wep_iv); DEBUGFS_ADD(rate_ctrl_alg); DEBUGFS_ADD(queues); DEBUGFS_ADD(misc); #ifdef CONFIG_PM DEBUGFS_ADD_MODE(reset, 0200); #endif DEBUGFS_ADD(hwflags); DEBUGFS_ADD(user_power); DEBUGFS_ADD(power); DEBUGFS_ADD(hw_conf); DEBUGFS_ADD_MODE(force_tx_status, 0600); DEBUGFS_ADD_MODE(aql_enable, 0600); DEBUGFS_ADD(aql_pending); DEBUGFS_ADD_MODE(aqm, 0600); DEBUGFS_ADD_MODE(airtime_flags, 0600); DEBUGFS_ADD(aql_txq_limit); debugfs_create_u32("aql_threshold", 0600, phyd, &local->aql_threshold); statsd = debugfs_create_dir("statistics", phyd); #ifdef CONFIG_MAC80211_DEBUG_COUNTERS DEBUGFS_STATS_ADD(dot11TransmittedFragmentCount); DEBUGFS_STATS_ADD(dot11MulticastTransmittedFrameCount); DEBUGFS_STATS_ADD(dot11FailedCount); DEBUGFS_STATS_ADD(dot11RetryCount); DEBUGFS_STATS_ADD(dot11MultipleRetryCount); DEBUGFS_STATS_ADD(dot11FrameDuplicateCount); DEBUGFS_STATS_ADD(dot11ReceivedFragmentCount); DEBUGFS_STATS_ADD(dot11MulticastReceivedFrameCount); DEBUGFS_STATS_ADD(dot11TransmittedFrameCount); DEBUGFS_STATS_ADD(tx_handlers_drop); DEBUGFS_STATS_ADD(tx_handlers_queued); DEBUGFS_STATS_ADD(tx_handlers_drop_wep); DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc); DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port); DEBUGFS_STATS_ADD(rx_handlers_drop); DEBUGFS_STATS_ADD(rx_handlers_queued); DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc); DEBUGFS_STATS_ADD(rx_handlers_drop_defrag); DEBUGFS_STATS_ADD(tx_expand_skb_head); DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned); DEBUGFS_STATS_ADD(rx_expand_skb_head_defrag); DEBUGFS_STATS_ADD(rx_handlers_fragments); DEBUGFS_STATS_ADD(tx_status_drop); #endif DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount); DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount); DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount); DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount); }
19 19 18 13 15 16 15 1 4 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 /* * Cryptographic API. * * MD5 Message Digest Algorithm (RFC1321). * * Derived from cryptoapi implementation, originally based on the * public domain implementation written by Colin Plumb in 1993. * * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <crypto/md5.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <asm/byteorder.h> const u8 md5_zero_message_hash[MD5_DIGEST_SIZE] = { 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, }; EXPORT_SYMBOL_GPL(md5_zero_message_hash); #define F1(x, y, z) (z ^ (x & (y ^ z))) #define F2(x, y, z) F1(z, x, y) #define F3(x, y, z) (x ^ y ^ z) #define F4(x, y, z) (y ^ (x | ~z)) #define MD5STEP(f, w, x, y, z, in, s) \ (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x) static void md5_transform(__u32 *hash, __u32 const *in) { u32 a, b, c, d; a = hash[0]; b = hash[1]; c = hash[2]; d = hash[3]; MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); hash[0] += a; hash[1] += b; hash[2] += c; hash[3] += d; } static inline void md5_transform_helper(struct md5_state *ctx) { le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); md5_transform(ctx->hash, ctx->block); } static int md5_init(struct shash_desc *desc) { struct md5_state *mctx = shash_desc_ctx(desc); mctx->hash[0] = MD5_H0; mctx->hash[1] = MD5_H1; mctx->hash[2] = MD5_H2; mctx->hash[3] = MD5_H3; mctx->byte_count = 0; return 0; } static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct md5_state *mctx = shash_desc_ctx(desc); const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); return 0; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, avail); md5_transform_helper(mctx); data += avail; len -= avail; while (len >= sizeof(mctx->block)) { memcpy(mctx->block, data, sizeof(mctx->block)); md5_transform_helper(mctx); data += sizeof(mctx->block); len -= sizeof(mctx->block); } memcpy(mctx->block, data, len); return 0; } static int md5_final(struct shash_desc *desc, u8 *out) { struct md5_state *mctx = shash_desc_ctx(desc); const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof (u64)); md5_transform_helper(mctx); p = (char *)mctx->block; padding = 56; } memset(p, 0, padding); mctx->block[14] = mctx->byte_count << 3; mctx->block[15] = mctx->byte_count >> 29; le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - sizeof(u64)) / sizeof(u32)); md5_transform(mctx->hash, mctx->block); cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(u32)); memcpy(out, mctx->hash, sizeof(mctx->hash)); memset(mctx, 0, sizeof(*mctx)); return 0; } static int md5_export(struct shash_desc *desc, void *out) { struct md5_state *ctx = shash_desc_ctx(desc); memcpy(out, ctx, sizeof(*ctx)); return 0; } static int md5_import(struct shash_desc *desc, const void *in) { struct md5_state *ctx = shash_desc_ctx(desc); memcpy(ctx, in, sizeof(*ctx)); return 0; } static struct shash_alg alg = { .digestsize = MD5_DIGEST_SIZE, .init = md5_init, .update = md5_update, .final = md5_final, .export = md5_export, .import = md5_import, .descsize = sizeof(struct md5_state), .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name = "md5-generic", .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init md5_mod_init(void) { return crypto_register_shash(&alg); } static void __exit md5_mod_fini(void) { crypto_unregister_shash(&alg); } subsys_initcall(md5_mod_init); module_exit(md5_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MD5 Message Digest Algorithm"); MODULE_ALIAS_CRYPTO("md5");
1 1 2 1 1 1 1 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 // SPDX-License-Identifier: GPL-2.0-or-later /* * Roccat KonePure driver for Linux * * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net> */ /* */ /* * Roccat KonePure is a smaller version of KoneXTD with less buttons and lights. */ #include <linux/types.h> #include <linux/device.h> #include <linux/input.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/hid-roccat.h> #include "hid-ids.h" #include "hid-roccat-common.h" enum { KONEPURE_MOUSE_REPORT_NUMBER_BUTTON = 3, }; struct konepure_mouse_report_button { uint8_t report_number; /* always KONEPURE_MOUSE_REPORT_NUMBER_BUTTON */ uint8_t zero; uint8_t type; uint8_t data1; uint8_t data2; uint8_t zero2; uint8_t unknown[2]; } __packed; ROCCAT_COMMON2_BIN_ATTRIBUTE_W(control, 0x04, 0x03); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(actual_profile, 0x05, 0x03); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_settings, 0x06, 0x1f); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(profile_buttons, 0x07, 0x3b); ROCCAT_COMMON2_BIN_ATTRIBUTE_W(macro, 0x08, 0x0822); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(info, 0x09, 0x06); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(tcu, 0x0c, 0x04); ROCCAT_COMMON2_BIN_ATTRIBUTE_R(tcu_image, 0x0c, 0x0404); ROCCAT_COMMON2_BIN_ATTRIBUTE_RW(sensor, 0x0f, 0x06); ROCCAT_COMMON2_BIN_ATTRIBUTE_W(talk, 0x10, 0x10); static struct bin_attribute *konepure_bin_attrs[] = { &bin_attr_actual_profile, &bin_attr_control, &bin_attr_info, &bin_attr_talk, &bin_attr_macro, &bin_attr_sensor, &bin_attr_tcu, &bin_attr_tcu_image, &bin_attr_profile_settings, &bin_attr_profile_buttons, NULL, }; static const struct attribute_group konepure_group = { .bin_attrs = konepure_bin_attrs, }; static const struct attribute_group *konepure_groups[] = { &konepure_group, NULL, }; static const struct class konepure_class = { .name = "konepure", .dev_groups = konepure_groups, }; static int konepure_init_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct usb_device *usb_dev = interface_to_usbdev(intf); struct roccat_common2_device *konepure; int retval; if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) { hid_set_drvdata(hdev, NULL); return 0; } konepure = kzalloc(sizeof(*konepure), GFP_KERNEL); if (!konepure) { hid_err(hdev, "can't alloc device descriptor\n"); return -ENOMEM; } hid_set_drvdata(hdev, konepure); retval = roccat_common2_device_init_struct(usb_dev, konepure); if (retval) { hid_err(hdev, "couldn't init KonePure device\n"); goto exit_free; } retval = roccat_connect(&konepure_class, hdev, sizeof(struct konepure_mouse_report_button)); if (retval < 0) { hid_err(hdev, "couldn't init char dev\n"); } else { konepure->chrdev_minor = retval; konepure->roccat_claimed = 1; } return 0; exit_free: kfree(konepure); return retval; } static void konepure_remove_specials(struct hid_device *hdev) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct roccat_common2_device *konepure; if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) return; konepure = hid_get_drvdata(hdev); if (konepure->roccat_claimed) roccat_disconnect(konepure->chrdev_minor); kfree(konepure); } static int konepure_probe(struct hid_device *hdev, const struct hid_device_id *id) { int retval; if (!hid_is_usb(hdev)) return -EINVAL; retval = hid_parse(hdev); if (retval) { hid_err(hdev, "parse failed\n"); goto exit; } retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (retval) { hid_err(hdev, "hw start failed\n"); goto exit; } retval = konepure_init_specials(hdev); if (retval) { hid_err(hdev, "couldn't install mouse\n"); goto exit_stop; } return 0; exit_stop: hid_hw_stop(hdev); exit: return retval; } static void konepure_remove(struct hid_device *hdev) { konepure_remove_specials(hdev); hid_hw_stop(hdev); } static int konepure_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct usb_interface *intf = to_usb_interface(hdev->dev.parent); struct roccat_common2_device *konepure = hid_get_drvdata(hdev); if (intf->cur_altsetting->desc.bInterfaceProtocol != USB_INTERFACE_PROTOCOL_MOUSE) return 0; if (data[0] != KONEPURE_MOUSE_REPORT_NUMBER_BUTTON) return 0; if (konepure != NULL && konepure->roccat_claimed) roccat_report_event(konepure->chrdev_minor, data); return 0; } static const struct hid_device_id konepure_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) }, { } }; MODULE_DEVICE_TABLE(hid, konepure_devices); static struct hid_driver konepure_driver = { .name = "konepure", .id_table = konepure_devices, .probe = konepure_probe, .remove = konepure_remove, .raw_event = konepure_raw_event }; static int __init konepure_init(void) { int retval; retval = class_register(&konepure_class); if (retval) return retval; retval = hid_register_driver(&konepure_driver); if (retval) class_unregister(&konepure_class); return retval; } static void __exit konepure_exit(void) { hid_unregister_driver(&konepure_driver); class_unregister(&konepure_class); } module_init(konepure_init); module_exit(konepure_exit); MODULE_AUTHOR("Stefan Achatz"); MODULE_DESCRIPTION("USB Roccat KonePure/Optical driver"); MODULE_LICENSE("GPL v2");
2 15 20 20 15 18 19 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 /* * llc_core.c - Minimum needed routines for sap handling and module init/exit * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/if_ether.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/init.h> #include <net/net_namespace.h> #include <net/llc.h> LIST_HEAD(llc_sap_list); static DEFINE_SPINLOCK(llc_sap_list_lock); /** * llc_sap_alloc - allocates and initializes sap. * * Allocates and initializes sap. */ static struct llc_sap *llc_sap_alloc(void) { struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC); int i; if (sap) { /* sap->laddr.mac - leave as a null, it's filled by bind */ sap->state = LLC_SAP_STATE_ACTIVE; spin_lock_init(&sap->sk_lock); for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++) INIT_HLIST_NULLS_HEAD(&sap->sk_laddr_hash[i], i); refcount_set(&sap->refcnt, 1); } return sap; } static struct llc_sap *__llc_sap_find(unsigned char sap_value) { struct llc_sap *sap; list_for_each_entry(sap, &llc_sap_list, node) if (sap->laddr.lsap == sap_value) goto out; sap = NULL; out: return sap; } /** * llc_sap_find - searches a SAP in station * @sap_value: sap to be found * * Searches for a sap in the sap list of the LLC's station upon the sap ID. * If the sap is found it will be refcounted and the user will have to do * a llc_sap_put after use. * Returns the sap or %NULL if not found. */ struct llc_sap *llc_sap_find(unsigned char sap_value) { struct llc_sap *sap; rcu_read_lock_bh(); sap = __llc_sap_find(sap_value); if (!sap || !llc_sap_hold_safe(sap)) sap = NULL; rcu_read_unlock_bh(); return sap; } /** * llc_sap_open - open interface to the upper layers. * @lsap: SAP number. * @func: rcv func for datalink protos * * Interface function to upper layer. Each one who wants to get a SAP * (for example NetBEUI) should call this function. Returns the opened * SAP for success, NULL for failure. */ struct llc_sap *llc_sap_open(unsigned char lsap, int (*func)(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)) { struct llc_sap *sap = NULL; spin_lock_bh(&llc_sap_list_lock); if (__llc_sap_find(lsap)) /* SAP already exists */ goto out; sap = llc_sap_alloc(); if (!sap) goto out; sap->laddr.lsap = lsap; sap->rcv_func = func; list_add_tail_rcu(&sap->node, &llc_sap_list); out: spin_unlock_bh(&llc_sap_list_lock); return sap; } /** * llc_sap_close - close interface for upper layers. * @sap: SAP to be closed. * * Close interface function to upper layer. Each one who wants to * close an open SAP (for example NetBEUI) should call this function. * Removes this sap from the list of saps in the station and then * frees the memory for this sap. */ void llc_sap_close(struct llc_sap *sap) { WARN_ON(sap->sk_count); spin_lock_bh(&llc_sap_list_lock); list_del_rcu(&sap->node); spin_unlock_bh(&llc_sap_list_lock); kfree_rcu(sap, rcu); } static struct packet_type llc_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_802_2), .func = llc_rcv, }; static int __init llc_init(void) { dev_add_pack(&llc_packet_type); return 0; } static void __exit llc_exit(void) { dev_remove_pack(&llc_packet_type); } module_init(llc_init); module_exit(llc_exit); EXPORT_SYMBOL(llc_sap_list); EXPORT_SYMBOL(llc_sap_find); EXPORT_SYMBOL(llc_sap_open); EXPORT_SYMBOL(llc_sap_close); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Procom 1997, Jay Schullist 2001, Arnaldo C. Melo 2001-2003"); MODULE_DESCRIPTION("LLC IEEE 802.2 core support");
3 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 /* IPv6-specific defines for netfilter. * (C)1998 Rusty Russell -- This code is GPL. * (C)1999 David Jeffery * this header was blatantly ripped from netfilter_ipv4.h * it's amazing what adding a bunch of 6s can do =8^) */ #ifndef __LINUX_IP6_NETFILTER_H #define __LINUX_IP6_NETFILTER_H #include <uapi/linux/netfilter_ipv6.h> #include <net/tcp.h> /* Check for an extension */ static inline int nf_ip6_ext_hdr(u8 nexthdr) { return (nexthdr == IPPROTO_HOPOPTS) || (nexthdr == IPPROTO_ROUTING) || (nexthdr == IPPROTO_FRAGMENT) || (nexthdr == IPPROTO_ESP) || (nexthdr == IPPROTO_AH) || (nexthdr == IPPROTO_NONE) || (nexthdr == IPPROTO_DSTOPTS); } /* Extra routing may needed on local out, as the QUEUE target never returns * control to the table. */ struct ip6_rt_info { struct in6_addr daddr; struct in6_addr saddr; u_int32_t mark; }; struct nf_queue_entry; struct nf_bridge_frag_data; /* * Hook functions for ipv6 to allow xt_* modules to be built-in even * if IPv6 is a module. */ struct nf_ipv6_ops { #if IS_MODULE(CONFIG_IPV6) int (*chk_addr)(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*dev_get_saddr)(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); u32 (*cookie_init_sequence)(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); int (*cookie_v6_check)(const struct ipv6hdr *iph, const struct tcphdr *th); #endif void (*route_input)(struct sk_buff *skb); int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); #if IS_MODULE(CONFIG_IPV6) int (*br_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, const struct nf_bridge_frag_data *data, struct sk_buff *)); #endif }; #ifdef CONFIG_NETFILTER #include <net/addrconf.h> extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return rcu_dereference(nf_ipv6_ops); } static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return 1; return v6_ops->chk_addr(net, addr, dev, strict); #elif IS_BUILTIN(CONFIG_IPV6) return ipv6_chk_addr(net, addr, dev, strict); #else return 1; #endif } int __nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict); static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); if (v6ops) return v6ops->route(net, dst, fl, strict); return -EHOSTUNREACH; #endif #if IS_BUILTIN(CONFIG_IPV6) return __nf_ip6_route(net, dst, fl, strict); #else return -EHOSTUNREACH; #endif } #include <net/netfilter/ipv6/nf_defrag_ipv6.h> int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, const struct nf_bridge_frag_data *data, struct sk_buff *)); static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct nf_bridge_frag_data *data, int (*output)(struct net *, struct sock *sk, const struct nf_bridge_frag_data *data, struct sk_buff *)) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return 1; return v6_ops->br_fragment(net, sk, skb, data, output); #elif IS_BUILTIN(CONFIG_IPV6) return br_ip6_fragment(net, sk, skb, data, output); #else return 1; #endif } int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb); static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (!v6_ops) return -EHOSTUNREACH; return v6_ops->route_me_harder(net, sk, skb); #elif IS_BUILTIN(CONFIG_IPV6) return ip6_route_me_harder(net, sk, skb); #else return -EHOSTUNREACH; #endif } static inline u32 nf_ipv6_cookie_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp) { #if IS_ENABLED(CONFIG_SYN_COOKIES) #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (v6_ops) return v6_ops->cookie_init_sequence(iph, th, mssp); #elif IS_BUILTIN(CONFIG_IPV6) return __cookie_v6_init_sequence(iph, th, mssp); #endif #endif return 0; } static inline int nf_cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th) { #if IS_ENABLED(CONFIG_SYN_COOKIES) #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); if (v6_ops) return v6_ops->cookie_v6_check(iph, th); #elif IS_BUILTIN(CONFIG_IPV6) return __cookie_v6_check(iph, th); #endif #endif return 0; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); int nf_ip6_check_hbh_len(struct sk_buff *skb, u32 *plen); int ipv6_netfilter_init(void); void ipv6_netfilter_fini(void); #else /* CONFIG_NETFILTER */ static inline int ipv6_netfilter_init(void) { return 0; } static inline void ipv6_netfilter_fini(void) { return; } static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) { return NULL; } #endif /* CONFIG_NETFILTER */ #endif /*__LINUX_IP6_NETFILTER_H*/
17 6 58 17 17 17 80 904 905 902 905 903 947 46 905 411 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 // SPDX-License-Identifier: GPL-2.0-only /* -*- linux-c -*- * sysctl_net.c: sysctl interface to net subsystem. * * Begun April 1, 1996, Mike Shaver. * Added /proc/sys/net directories for each protocol family. [MS] * * Revision 1.2 1996/05/08 20:24:40 shaver * Added bits for NET_BRIDGE and the NET_IPV4_ARP stuff and * NET_IPV4_IP_FORWARD. * * */ #include <linux/mm.h> #include <linux/export.h> #include <linux/sysctl.h> #include <linux/nsproxy.h> #include <net/sock.h> #ifdef CONFIG_INET #include <net/ip.h> #endif #ifdef CONFIG_NET #include <linux/if_ether.h> #endif static struct ctl_table_set * net_ctl_header_lookup(struct ctl_table_root *root) { return &current->nsproxy->net_ns->sysctls; } static int is_seen(struct ctl_table_set *set) { return &current->nsproxy->net_ns->sysctls == set; } /* Return standard mode bits for table entry. */ static int net_ctl_permissions(struct ctl_table_header *head, const struct ctl_table *table) { struct net *net = container_of(head->set, struct net, sysctls); /* Allow network administrator to have same access as root. */ if (ns_capable_noaudit(net->user_ns, CAP_NET_ADMIN)) { int mode = (table->mode >> 6) & 7; return (mode << 6) | (mode << 3) | mode; } return table->mode; } static void net_ctl_set_ownership(struct ctl_table_header *head, kuid_t *uid, kgid_t *gid) { struct net *net = container_of(head->set, struct net, sysctls); kuid_t ns_root_uid; kgid_t ns_root_gid; ns_root_uid = make_kuid(net->user_ns, 0); if (uid_valid(ns_root_uid)) *uid = ns_root_uid; ns_root_gid = make_kgid(net->user_ns, 0); if (gid_valid(ns_root_gid)) *gid = ns_root_gid; } static struct ctl_table_root net_sysctl_root = { .lookup = net_ctl_header_lookup, .permissions = net_ctl_permissions, .set_ownership = net_ctl_set_ownership, }; static int __net_init sysctl_net_init(struct net *net) { setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen); return 0; } static void __net_exit sysctl_net_exit(struct net *net) { retire_sysctl_set(&net->sysctls); } static struct pernet_operations sysctl_pernet_ops = { .init = sysctl_net_init, .exit = sysctl_net_exit, }; static struct ctl_table_header *net_header; __init int net_sysctl_init(void) { static struct ctl_table empty[1]; int ret = -ENOMEM; /* Avoid limitations in the sysctl implementation by * registering "/proc/sys/net" as an empty directory not in a * network namespace. */ net_header = register_sysctl_sz("net", empty, 0); if (!net_header) goto out; ret = register_pernet_subsys(&sysctl_pernet_ops); if (ret) goto out1; out: return ret; out1: unregister_sysctl_table(net_header); net_header = NULL; goto out; } /* Verify that sysctls for non-init netns are safe by either: * 1) being read-only, or * 2) having a data pointer which points outside of the global kernel/module * data segment, and rather into the heap where a per-net object was * allocated. */ static void ensure_safe_net_sysctl(struct net *net, const char *path, struct ctl_table *table, size_t table_size) { struct ctl_table *ent; pr_debug("Registering net sysctl (net %p): %s\n", net, path); ent = table; for (size_t i = 0; i < table_size; ent++, i++) { unsigned long addr; const char *where; pr_debug(" procname=%s mode=%o proc_handler=%ps data=%p\n", ent->procname, ent->mode, ent->proc_handler, ent->data); /* If it's not writable inside the netns, then it can't hurt. */ if ((ent->mode & 0222) == 0) { pr_debug(" Not writable by anyone\n"); continue; } /* Where does data point? */ addr = (unsigned long)ent->data; if (is_module_address(addr)) where = "module"; else if (is_kernel_core_data(addr)) where = "kernel"; else continue; /* If it is writable and points to kernel/module global * data, then it's probably a netns leak. */ WARN(1, "sysctl %s/%s: data points to %s global data: %ps\n", path, ent->procname, where, ent->data); /* Make it "safe" by dropping writable perms */ ent->mode &= ~0222; } } struct ctl_table_header *register_net_sysctl_sz(struct net *net, const char *path, struct ctl_table *table, size_t table_size) { if (!net_eq(net, &init_net)) ensure_safe_net_sysctl(net, path, table, table_size); return __register_sysctl_table(&net->sysctls, path, table, table_size); } EXPORT_SYMBOL_GPL(register_net_sysctl_sz); void unregister_net_sysctl_table(struct ctl_table_header *header) { unregister_sysctl_table(header); } EXPORT_SYMBOL_GPL(unregister_net_sysctl_table);
2 2 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 // SPDX-License-Identifier: GPL-2.0-or-later /* AFS volume management * * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) */ #include <linux/kernel.h> #include <linux/slab.h> #include "internal.h" static unsigned __read_mostly afs_volume_record_life = 60 * 60; static void afs_destroy_volume(struct work_struct *work); /* * Insert a volume into a cell. If there's an existing volume record, that is * returned instead with a ref held. */ static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell, struct afs_volume *volume) { struct afs_volume *p; struct rb_node *parent = NULL, **pp; write_seqlock(&cell->volume_lock); pp = &cell->volumes.rb_node; while (*pp) { parent = *pp; p = rb_entry(parent, struct afs_volume, cell_node); if (p->vid < volume->vid) { pp = &(*pp)->rb_left; } else if (p->vid > volume->vid) { pp = &(*pp)->rb_right; } else { if (afs_try_get_volume(p, afs_volume_trace_get_cell_insert)) { volume = p; goto found; } set_bit(AFS_VOLUME_RM_TREE, &volume->flags); rb_replace_node_rcu(&p->cell_node, &volume->cell_node, &cell->volumes); } } rb_link_node_rcu(&volume->cell_node, parent, pp); rb_insert_color(&volume->cell_node, &cell->volumes); hlist_add_head_rcu(&volume->proc_link, &cell->proc_volumes); found: write_sequnlock(&cell->volume_lock); return volume; } static void afs_remove_volume_from_cell(struct afs_volume *volume) { struct afs_cell *cell = volume->cell; if (!hlist_unhashed(&volume->proc_link)) { trace_afs_volume(volume->vid, refcount_read(&cell->ref), afs_volume_trace_remove); write_seqlock(&cell->volume_lock); hlist_del_rcu(&volume->proc_link); if (!test_and_set_bit(AFS_VOLUME_RM_TREE, &volume->flags)) rb_erase(&volume->cell_node, &cell->volumes); write_sequnlock(&cell->volume_lock); } } /* * Allocate a volume record and load it up from a vldb record. */ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params, struct afs_vldb_entry *vldb, struct afs_server_list **_slist) { struct afs_server_list *slist; struct afs_volume *volume; int ret = -ENOMEM, i; volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL); if (!volume) goto error_0; volume->vid = vldb->vid[params->type]; volume->update_at = ktime_get_real_seconds() + afs_volume_record_life; volume->cell = afs_get_cell(params->cell, afs_cell_trace_get_vol); volume->type = params->type; volume->type_force = params->force; volume->name_len = vldb->name_len; volume->creation_time = TIME64_MIN; volume->update_time = TIME64_MIN; refcount_set(&volume->ref, 1); INIT_HLIST_NODE(&volume->proc_link); INIT_WORK(&volume->destructor, afs_destroy_volume); rwlock_init(&volume->servers_lock); mutex_init(&volume->volsync_lock); mutex_init(&volume->cb_check_lock); rwlock_init(&volume->cb_v_break_lock); INIT_LIST_HEAD(&volume->open_mmaps); init_rwsem(&volume->open_mmaps_lock); memcpy(volume->name, vldb->name, vldb->name_len + 1); for (i = 0; i < AFS_MAXTYPES; i++) volume->vids[i] = vldb->vid[i]; slist = afs_alloc_server_list(volume, params->key, vldb); if (IS_ERR(slist)) { ret = PTR_ERR(slist); goto error_1; } *_slist = slist; rcu_assign_pointer(volume->servers, slist); trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc); return volume; error_1: afs_put_cell(volume->cell, afs_cell_trace_put_vol); kfree(volume); error_0: return ERR_PTR(ret); } /* * Look up or allocate a volume record. */ static struct afs_volume *afs_lookup_volume(struct afs_fs_context *params, struct afs_vldb_entry *vldb) { struct afs_server_list *slist; struct afs_volume *candidate, *volume; candidate = afs_alloc_volume(params, vldb, &slist); if (IS_ERR(candidate)) return candidate; volume = afs_insert_volume_into_cell(params->cell, candidate); if (volume == candidate) afs_attach_volume_to_servers(volume, slist); else afs_put_volume(candidate, afs_volume_trace_put_cell_dup); return volume; } /* * Look up a VLDB record for a volume. */ static struct afs_vldb_entry *afs_vl_lookup_vldb(struct afs_cell *cell, struct key *key, const char *volname, size_t volnamesz) { struct afs_vldb_entry *vldb = ERR_PTR(-EDESTADDRREQ); struct afs_vl_cursor vc; int ret; if (!afs_begin_vlserver_operation(&vc, cell, key)) return ERR_PTR(-ERESTARTSYS); while (afs_select_vlserver(&vc)) { vldb = afs_vl_get_entry_by_name_u(&vc, volname, volnamesz); } ret = afs_end_vlserver_operation(&vc); return ret < 0 ? ERR_PTR(ret) : vldb; } /* * Look up a volume in the VL server and create a candidate volume record for * it. * * The volume name can be one of the following: * "%[cell:]volume[.]" R/W volume * "#[cell:]volume[.]" R/O or R/W volume (rwparent=0), * or R/W (rwparent=1) volume * "%[cell:]volume.readonly" R/O volume * "#[cell:]volume.readonly" R/O volume * "%[cell:]volume.backup" Backup volume * "#[cell:]volume.backup" Backup volume * * The cell name is optional, and defaults to the current cell. * * See "The Rules of Mount Point Traversal" in Chapter 5 of the AFS SysAdmin * Guide * - Rule 1: Explicit type suffix forces access of that type or nothing * (no suffix, then use Rule 2 & 3) * - Rule 2: If parent volume is R/O, then mount R/O volume by preference, R/W * if not available * - Rule 3: If parent volume is R/W, then only mount R/W volume unless * explicitly told otherwise */ struct afs_volume *afs_create_volume(struct afs_fs_context *params) { struct afs_vldb_entry *vldb; struct afs_volume *volume; unsigned long type_mask = 1UL << params->type; vldb = afs_vl_lookup_vldb(params->cell, params->key, params->volname, params->volnamesz); if (IS_ERR(vldb)) return ERR_CAST(vldb); if (test_bit(AFS_VLDB_QUERY_ERROR, &vldb->flags)) { volume = ERR_PTR(vldb->error); goto error; } /* Make the final decision on the type we want */ volume = ERR_PTR(-ENOMEDIUM); if (params->force) { if (!(vldb->flags & type_mask)) goto error; } else if (test_bit(AFS_VLDB_HAS_RO, &vldb->flags)) { params->type = AFSVL_ROVOL; } else if (test_bit(AFS_VLDB_HAS_RW, &vldb->flags)) { params->type = AFSVL_RWVOL; } else { goto error; } volume = afs_lookup_volume(params, vldb); error: kfree(vldb); return volume; } /* * Destroy a volume record */ static void afs_destroy_volume(struct work_struct *work) { struct afs_volume *volume = container_of(work, struct afs_volume, destructor); struct afs_server_list *slist = rcu_access_pointer(volume->servers); _enter("%p", volume); #ifdef CONFIG_AFS_FSCACHE ASSERTCMP(volume->cache, ==, NULL); #endif afs_detach_volume_from_servers(volume, slist); afs_remove_volume_from_cell(volume); afs_put_serverlist(volume->cell->net, slist); afs_put_cell(volume->cell, afs_cell_trace_put_vol); trace_afs_volume(volume->vid, refcount_read(&volume->ref), afs_volume_trace_free); kfree_rcu(volume, rcu); _leave(" [destroyed]"); } /* * Try to get a reference on a volume record. */ bool afs_try_get_volume(struct afs_volume *volume, enum afs_volume_trace reason) { int r; if (__refcount_inc_not_zero(&volume->ref, &r)) { trace_afs_volume(volume->vid, r + 1, reason); return true; } return false; } /* * Get a reference on a volume record. */ struct afs_volume *afs_get_volume(struct afs_volume *volume, enum afs_volume_trace reason) { if (volume) { int r; __refcount_inc(&volume->ref, &r); trace_afs_volume(volume->vid, r + 1, reason); } return volume; } /* * Drop a reference on a volume record. */ void afs_put_volume(struct afs_volume *volume, enum afs_volume_trace reason) { if (volume) { afs_volid_t vid = volume->vid; bool zero; int r; zero = __refcount_dec_and_test(&volume->ref, &r); trace_afs_volume(vid, r - 1, reason); if (zero) schedule_work(&volume->destructor); } } /* * Activate a volume. */ int afs_activate_volume(struct afs_volume *volume) { #ifdef CONFIG_AFS_FSCACHE struct fscache_volume *vcookie; char *name; name = kasprintf(GFP_KERNEL, "afs,%s,%llx", volume->cell->name, volume->vid); if (!name) return -ENOMEM; vcookie = fscache_acquire_volume(name, NULL, NULL, 0); if (IS_ERR(vcookie)) { if (vcookie != ERR_PTR(-EBUSY)) { kfree(name); return PTR_ERR(vcookie); } pr_err("AFS: Cache volume key already in use (%s)\n", name); vcookie = NULL; } volume->cache = vcookie; kfree(name); #endif return 0; } /* * Deactivate a volume. */ void afs_deactivate_volume(struct afs_volume *volume) { _enter("%s", volume->name); #ifdef CONFIG_AFS_FSCACHE fscache_relinquish_volume(volume->cache, NULL, test_bit(AFS_VOLUME_DELETED, &volume->flags)); volume->cache = NULL; #endif _leave(""); } /* * Query the VL service to update the volume status. */ static int afs_update_volume_status(struct afs_volume *volume, struct key *key) { struct afs_server_list *new, *old, *discard; struct afs_vldb_entry *vldb; char idbuf[24]; int ret, idsz; _enter(""); /* We look up an ID by passing it as a decimal string in the * operation's name parameter. */ idsz = snprintf(idbuf, sizeof(idbuf), "%llu", volume->vid); vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz); if (IS_ERR(vldb)) { ret = PTR_ERR(vldb); goto error; } /* See if the volume got renamed. */ if (vldb->name_len != volume->name_len || memcmp(vldb->name, volume->name, vldb->name_len) != 0) { /* TODO: Use RCU'd string. */ memcpy(volume->name, vldb->name, AFS_MAXVOLNAME); volume->name_len = vldb->name_len; } /* See if the volume's server list got updated. */ new = afs_alloc_server_list(volume, key, vldb); if (IS_ERR(new)) { ret = PTR_ERR(new); goto error_vldb; } write_lock(&volume->servers_lock); discard = new; old = rcu_dereference_protected(volume->servers, lockdep_is_held(&volume->servers_lock)); if (afs_annotate_server_list(new, old)) { new->seq = volume->servers_seq + 1; rcu_assign_pointer(volume->servers, new); smp_wmb(); volume->servers_seq++; discard = old; } /* Check more often if replication is ongoing. */ if (new->ro_replicating) volume->update_at = ktime_get_real_seconds() + 10 * 60; else volume->update_at = ktime_get_real_seconds() + afs_volume_record_life; write_unlock(&volume->servers_lock); if (discard == old) afs_reattach_volume_to_servers(volume, new, old); afs_put_serverlist(volume->cell->net, discard); ret = 0; error_vldb: kfree(vldb); error: _leave(" = %d", ret); return ret; } /* * Make sure the volume record is up to date. */ int afs_check_volume_status(struct afs_volume *volume, struct afs_operation *op) { int ret, retries = 0; _enter(""); retry: if (test_bit(AFS_VOLUME_WAIT, &volume->flags)) goto wait; if (volume->update_at <= ktime_get_real_seconds() || test_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags)) goto update; _leave(" = 0"); return 0; update: if (!test_and_set_bit_lock(AFS_VOLUME_UPDATING, &volume->flags)) { clear_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); ret = afs_update_volume_status(volume, op->key); if (ret < 0) set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags); clear_bit_unlock(AFS_VOLUME_WAIT, &volume->flags); clear_bit_unlock(AFS_VOLUME_UPDATING, &volume->flags); wake_up_bit(&volume->flags, AFS_VOLUME_WAIT); _leave(" = %d", ret); return ret; } wait: if (!test_bit(AFS_VOLUME_WAIT, &volume->flags)) { _leave(" = 0 [no wait]"); return 0; } ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT, (op->flags & AFS_OPERATION_UNINTR) ? TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE); if (ret == -ERESTARTSYS) { _leave(" = %d", ret); return ret; } retries++; if (retries == 4) { _leave(" = -ESTALE"); return -ESTALE; } goto retry; }
6 6