| 4 4 5 3 3 4 1 6 2 1 2 4 5 5 3 3 3 2 3 3 3 260 209 1 1 9 1 2 1 3 2 5 10 9 22 12 10 8 2 6 2 1 1 1 238 239 85 3 5 5 1 8 1 7 2 3 2 2 1 2 2 11 11 8 2 1 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 | /* * memfd_create system call and file sealing support * * Code was originally included in shmem.c, and broken out to facilitate * use by hugetlbfs as well as tmpfs. * * This file is released under the GPL. */ #include <linux/fs.h> #include <linux/vfs.h> #include <linux/pagemap.h> #include <linux/file.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/khugepaged.h> #include <linux/syscalls.h> #include <linux/hugetlb.h> #include <linux/shmem_fs.h> #include <linux/memfd.h> #include <linux/pid_namespace.h> #include <uapi/linux/memfd.h> #include "swap.h" /* * We need a tag: a new tag would expand every xa_node by 8 bytes, * so reuse a tag which we firmly believe is never set or cleared on tmpfs * or hugetlbfs because they are memory only filesystems. */ #define MEMFD_TAG_PINNED PAGECACHE_TAG_TOWRITE #define LAST_SCAN 4 /* about 150ms max */ static bool memfd_folio_has_extra_refs(struct folio *folio) { return folio_ref_count(folio) != folio_expected_ref_count(folio); } static void memfd_tag_pins(struct xa_state *xas) { struct folio *folio; int latency = 0; lru_add_drain(); xas_lock_irq(xas); xas_for_each(xas, folio, ULONG_MAX) { if (!xa_is_value(folio) && memfd_folio_has_extra_refs(folio)) xas_set_mark(xas, MEMFD_TAG_PINNED); if (++latency < XA_CHECK_SCHED) continue; latency = 0; xas_pause(xas); xas_unlock_irq(xas); cond_resched(); xas_lock_irq(xas); } xas_unlock_irq(xas); } /* * This is a helper function used by memfd_pin_user_pages() in GUP (gup.c). * It is mainly called to allocate a folio in a memfd when the caller * (memfd_pin_folios()) cannot find a folio in the page cache at a given * index in the mapping. */ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) { #ifdef CONFIG_HUGETLB_PAGE struct folio *folio; gfp_t gfp_mask; if (is_file_hugepages(memfd)) { /* * The folio would most likely be accessed by a DMA driver, * therefore, we have zone memory constraints where we can * alloc from. Also, the folio will be pinned for an indefinite * amount of time, so it is not expected to be migrated away. */ struct inode *inode = file_inode(memfd); struct hstate *h = hstate_file(memfd); int err = -ENOMEM; long nr_resv; gfp_mask = htlb_alloc_mask(h); gfp_mask &= ~(__GFP_HIGHMEM | __GFP_MOVABLE); idx >>= huge_page_order(h); nr_resv = hugetlb_reserve_pages(inode, idx, idx + 1, NULL, 0); if (nr_resv < 0) return ERR_PTR(nr_resv); folio = alloc_hugetlb_folio_reserve(h, numa_node_id(), NULL, gfp_mask); if (folio) { err = hugetlb_add_to_page_cache(folio, memfd->f_mapping, idx); if (err) { folio_put(folio); goto err_unresv; } hugetlb_set_folio_subpool(folio, subpool_inode(inode)); folio_unlock(folio); return folio; } err_unresv: if (nr_resv > 0) hugetlb_unreserve_pages(inode, idx, idx + 1, 0); return ERR_PTR(err); } #endif return shmem_read_folio(memfd->f_mapping, idx); } /* * Setting SEAL_WRITE requires us to verify there's no pending writer. However, * via get_user_pages(), drivers might have some pending I/O without any active * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all folios * and see whether it has an elevated ref-count. If so, we tag them and wait for * them to be dropped. * The caller must guarantee that no new user will acquire writable references * to those folios to avoid races. */ static int memfd_wait_for_pins(struct address_space *mapping) { XA_STATE(xas, &mapping->i_pages, 0); struct folio *folio; int error, scan; memfd_tag_pins(&xas); error = 0; for (scan = 0; scan <= LAST_SCAN; scan++) { int latency = 0; if (!xas_marked(&xas, MEMFD_TAG_PINNED)) break; if (!scan) lru_add_drain_all(); else if (schedule_timeout_killable((HZ << scan) / 200)) scan = LAST_SCAN; xas_set(&xas, 0); xas_lock_irq(&xas); xas_for_each_marked(&xas, folio, ULONG_MAX, MEMFD_TAG_PINNED) { bool clear = true; if (!xa_is_value(folio) && memfd_folio_has_extra_refs(folio)) { /* * On the last scan, we clean up all those tags * we inserted; but make a note that we still * found folios pinned. */ if (scan == LAST_SCAN) error = -EBUSY; else clear = false; } if (clear) xas_clear_mark(&xas, MEMFD_TAG_PINNED); if (++latency < XA_CHECK_SCHED) continue; latency = 0; xas_pause(&xas); xas_unlock_irq(&xas); cond_resched(); xas_lock_irq(&xas); } xas_unlock_irq(&xas); } return error; } static unsigned int *memfd_file_seals_ptr(struct file *file) { if (shmem_file(file)) return &SHMEM_I(file_inode(file))->seals; #ifdef CONFIG_HUGETLBFS if (is_file_hugepages(file)) return &HUGETLBFS_I(file_inode(file))->seals; #endif return NULL; } #define F_ALL_SEALS (F_SEAL_SEAL | \ F_SEAL_EXEC | \ F_SEAL_SHRINK | \ F_SEAL_GROW | \ F_SEAL_WRITE | \ F_SEAL_FUTURE_WRITE) static int memfd_add_seals(struct file *file, unsigned int seals) { struct inode *inode = file_inode(file); unsigned int *file_seals; int error; /* * SEALING * Sealing allows multiple parties to share a tmpfs or hugetlbfs file * but restrict access to a specific subset of file operations. Seals * can only be added, but never removed. This way, mutually untrusted * parties can share common memory regions with a well-defined policy. * A malicious peer can thus never perform unwanted operations on a * shared object. * * Seals are only supported on special tmpfs or hugetlbfs files and * always affect the whole underlying inode. Once a seal is set, it * may prevent some kinds of access to the file. Currently, the * following seals are defined: * SEAL_SEAL: Prevent further seals from being set on this file * SEAL_SHRINK: Prevent the file from shrinking * SEAL_GROW: Prevent the file from growing * SEAL_WRITE: Prevent write access to the file * SEAL_EXEC: Prevent modification of the exec bits in the file mode * * As we don't require any trust relationship between two parties, we * must prevent seals from being removed. Therefore, sealing a file * only adds a given set of seals to the file, it never touches * existing seals. Furthermore, the "setting seals"-operation can be * sealed itself, which basically prevents any further seal from being * added. * * Semantics of sealing are only defined on volatile files. Only * anonymous tmpfs and hugetlbfs files support sealing. More * importantly, seals are never written to disk. Therefore, there's * no plan to support it on other file types. */ if (!(file->f_mode & FMODE_WRITE)) return -EPERM; if (seals & ~(unsigned int)F_ALL_SEALS) return -EINVAL; inode_lock(inode); file_seals = memfd_file_seals_ptr(file); if (!file_seals) { error = -EINVAL; goto unlock; } if (*file_seals & F_SEAL_SEAL) { error = -EPERM; goto unlock; } if ((seals & F_SEAL_WRITE) && !(*file_seals & F_SEAL_WRITE)) { error = mapping_deny_writable(file->f_mapping); if (error) goto unlock; error = memfd_wait_for_pins(file->f_mapping); if (error) { mapping_allow_writable(file->f_mapping); goto unlock; } } /* * SEAL_EXEC implies SEAL_WRITE, making W^X from the start. */ if (seals & F_SEAL_EXEC && inode->i_mode & 0111) seals |= F_SEAL_SHRINK|F_SEAL_GROW|F_SEAL_WRITE|F_SEAL_FUTURE_WRITE; *file_seals |= seals; error = 0; unlock: inode_unlock(inode); return error; } static int memfd_get_seals(struct file *file) { unsigned int *seals = memfd_file_seals_ptr(file); return seals ? *seals : -EINVAL; } long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg) { long error; switch (cmd) { case F_ADD_SEALS: error = memfd_add_seals(file, arg); break; case F_GET_SEALS: error = memfd_get_seals(file); break; default: error = -EINVAL; break; } return error; } #define MFD_NAME_PREFIX "memfd:" #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1) #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN) #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING | MFD_HUGETLB | MFD_NOEXEC_SEAL | MFD_EXEC) static int check_sysctl_memfd_noexec(unsigned int *flags) { #ifdef CONFIG_SYSCTL struct pid_namespace *ns = task_active_pid_ns(current); int sysctl = pidns_memfd_noexec_scope(ns); if (!(*flags & (MFD_EXEC | MFD_NOEXEC_SEAL))) { if (sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL) *flags |= MFD_NOEXEC_SEAL; else *flags |= MFD_EXEC; } if (!(*flags & MFD_NOEXEC_SEAL) && sysctl >= MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED) { pr_err_ratelimited( "%s[%d]: memfd_create() requires MFD_NOEXEC_SEAL with vm.memfd_noexec=%d\n", current->comm, task_pid_nr(current), sysctl); return -EACCES; } #endif return 0; } static inline bool is_write_sealed(unsigned int seals) { return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); } static int check_write_seal(vm_flags_t *vm_flags_ptr) { vm_flags_t vm_flags = *vm_flags_ptr; vm_flags_t mask = vm_flags & (VM_SHARED | VM_WRITE); /* If a private mapping then writability is irrelevant. */ if (!(mask & VM_SHARED)) return 0; /* * New PROT_WRITE and MAP_SHARED mmaps are not allowed when * write seals are active. */ if (mask & VM_WRITE) return -EPERM; /* * This is a read-only mapping, disallow mprotect() from making a * write-sealed mapping writable in future. */ *vm_flags_ptr &= ~VM_MAYWRITE; return 0; } int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr) { int err = 0; unsigned int *seals_ptr = memfd_file_seals_ptr(file); unsigned int seals = seals_ptr ? *seals_ptr : 0; if (is_write_sealed(seals)) err = check_write_seal(vm_flags_ptr); return err; } static int sanitize_flags(unsigned int *flags_ptr) { unsigned int flags = *flags_ptr; if (!(flags & MFD_HUGETLB)) { if (flags & ~(unsigned int)MFD_ALL_FLAGS) return -EINVAL; } else { /* Allow huge page size encoding in flags. */ if (flags & ~(unsigned int)(MFD_ALL_FLAGS | (MFD_HUGE_MASK << MFD_HUGE_SHIFT))) return -EINVAL; } /* Invalid if both EXEC and NOEXEC_SEAL are set.*/ if ((flags & MFD_EXEC) && (flags & MFD_NOEXEC_SEAL)) return -EINVAL; return check_sysctl_memfd_noexec(flags_ptr); } static char *alloc_name(const char __user *uname) { int error; char *name; long len; name = kmalloc(NAME_MAX + 1, GFP_KERNEL); if (!name) return ERR_PTR(-ENOMEM); memcpy(name, MFD_NAME_PREFIX, MFD_NAME_PREFIX_LEN); /* returned length does not include terminating zero */ len = strncpy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, MFD_NAME_MAX_LEN + 1); if (len < 0) { error = -EFAULT; goto err_name; } else if (len > MFD_NAME_MAX_LEN) { error = -EINVAL; goto err_name; } return name; err_name: kfree(name); return ERR_PTR(error); } static struct file *alloc_file(const char *name, unsigned int flags) { unsigned int *file_seals; struct file *file; if (flags & MFD_HUGETLB) { file = hugetlb_file_setup(name, 0, VM_NORESERVE, HUGETLB_ANONHUGE_INODE, (flags >> MFD_HUGE_SHIFT) & MFD_HUGE_MASK); } else { file = shmem_file_setup(name, 0, VM_NORESERVE); } if (IS_ERR(file)) return file; file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; file->f_flags |= O_LARGEFILE; if (flags & MFD_NOEXEC_SEAL) { struct inode *inode = file_inode(file); inode->i_mode &= ~0111; file_seals = memfd_file_seals_ptr(file); if (file_seals) { *file_seals &= ~F_SEAL_SEAL; *file_seals |= F_SEAL_EXEC; } } else if (flags & MFD_ALLOW_SEALING) { /* MFD_EXEC and MFD_ALLOW_SEALING are set */ file_seals = memfd_file_seals_ptr(file); if (file_seals) *file_seals &= ~F_SEAL_SEAL; } return file; } SYSCALL_DEFINE2(memfd_create, const char __user *, uname, unsigned int, flags) { struct file *file; int fd, error; char *name; error = sanitize_flags(&flags); if (error < 0) return error; name = alloc_name(uname); if (IS_ERR(name)) return PTR_ERR(name); fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0); if (fd < 0) { error = fd; goto err_free_name; } file = alloc_file(name, flags); if (IS_ERR(file)) { error = PTR_ERR(file); goto err_free_fd; } fd_install(fd, file); kfree(name); return fd; err_free_fd: put_unused_fd(fd); err_free_name: kfree(name); return error; } |
| 3 1 1 33 4 25 8 34 30 6 34 34 6 2 6 6 6 2 6 1 6 37 4 31 34 34 34 3 22 13 13 2 2 2 2 2 2 22 1 31 36 8 50 50 29 34 2 8 2 9 37 37 46 10 50 49 50 50 50 53 53 3 50 53 53 51 52 52 53 53 53 56 6 53 1 2 46 3 45 19 13 4 11 24 1 45 44 45 42 5 45 2 56 1 1 1 53 53 1 2 1 49 14 1 48 46 1 2 46 2 43 52 55 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 | // SPDX-License-Identifier: GPL-2.0 /* * mm/mprotect.c * * (C) Copyright 1994 Linus Torvalds * (C) Copyright 2002 Christoph Hellwig * * Address space accounting code <alan@lxorguk.ukuu.org.uk> * (C) Copyright 2002 Red Hat Inc, All Rights Reserved */ #include <linux/pagewalk.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/mman.h> #include <linux/fs.h> #include <linux/highmem.h> #include <linux/security.h> #include <linux/mempolicy.h> #include <linux/personality.h> #include <linux/syscalls.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/mmu_notifier.h> #include <linux/migrate.h> #include <linux/perf_event.h> #include <linux/pkeys.h> #include <linux/ksm.h> #include <linux/uaccess.h> #include <linux/mm_inline.h> #include <linux/pgtable.h> #include <linux/sched/sysctl.h> #include <linux/userfaultfd_k.h> #include <linux/memory-tiers.h> #include <uapi/linux/mman.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include "internal.h" static bool maybe_change_pte_writable(struct vm_area_struct *vma, pte_t pte) { if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) return false; /* Don't touch entries that are not even readable. */ if (pte_protnone(pte)) return false; /* Do we need write faults for softdirty tracking? */ if (pte_needs_soft_dirty_wp(vma, pte)) return false; /* Do we need write faults for uffd-wp tracking? */ if (userfaultfd_pte_wp(vma, pte)) return false; return true; } static bool can_change_private_pte_writable(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { struct page *page; if (!maybe_change_pte_writable(vma, pte)) return false; /* * Writable MAP_PRIVATE mapping: We can only special-case on * exclusive anonymous pages, because we know that our * write-fault handler similarly would map them writable without * any additional checks while holding the PT lock. */ page = vm_normal_page(vma, addr, pte); return page && PageAnon(page) && PageAnonExclusive(page); } static bool can_change_shared_pte_writable(struct vm_area_struct *vma, pte_t pte) { if (!maybe_change_pte_writable(vma, pte)) return false; VM_WARN_ON_ONCE(is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)); /* * Writable MAP_SHARED mapping: "clean" might indicate that the FS still * needs a real write-fault for writenotify * (see vma_wants_writenotify()). If "dirty", the assumption is that the * FS was already notified and we can simply mark the PTE writable * just like the write-fault handler would do. */ return pte_dirty(pte); } bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, pte_t pte) { if (!(vma->vm_flags & VM_SHARED)) return can_change_private_pte_writable(vma, addr, pte); return can_change_shared_pte_writable(vma, pte); } static int mprotect_folio_pte_batch(struct folio *folio, pte_t *ptep, pte_t pte, int max_nr_ptes, fpb_t flags) { /* No underlying folio, so cannot batch */ if (!folio) return 1; if (!folio_test_large(folio)) return 1; return folio_pte_batch_flags(folio, NULL, ptep, &pte, max_nr_ptes, flags); } static bool prot_numa_skip(struct vm_area_struct *vma, unsigned long addr, pte_t oldpte, pte_t *pte, int target_node, struct folio *folio) { bool ret = true; bool toptier; int nid; /* Avoid TLB flush if possible */ if (pte_protnone(oldpte)) goto skip; if (!folio) goto skip; if (folio_is_zone_device(folio) || folio_test_ksm(folio)) goto skip; /* Also skip shared copy-on-write pages */ if (is_cow_mapping(vma->vm_flags) && (folio_maybe_dma_pinned(folio) || folio_maybe_mapped_shared(folio))) goto skip; /* * While migration can move some dirty pages, * it cannot move them all from MIGRATE_ASYNC * context. */ if (folio_is_file_lru(folio) && folio_test_dirty(folio)) goto skip; /* * Don't mess with PTEs if page is already on the node * a single-threaded process is running on. */ nid = folio_nid(folio); if (target_node == nid) goto skip; toptier = node_is_toptier(nid); /* * Skip scanning top tier node if normal numa * balancing is disabled */ if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_NORMAL) && toptier) goto skip; ret = false; if (folio_use_access_time(folio)) folio_xchg_access_time(folio, jiffies_to_msecs(jiffies)); skip: return ret; } /* Set nr_ptes number of ptes, starting from idx */ static void prot_commit_flush_ptes(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, pte_t oldpte, pte_t ptent, int nr_ptes, int idx, bool set_write, struct mmu_gather *tlb) { /* * Advance the position in the batch by idx; note that if idx > 0, * then the nr_ptes passed here is <= batch size - idx. */ addr += idx * PAGE_SIZE; ptep += idx; oldpte = pte_advance_pfn(oldpte, idx); ptent = pte_advance_pfn(ptent, idx); if (set_write) ptent = pte_mkwrite(ptent, vma); modify_prot_commit_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes); if (pte_needs_flush(oldpte, ptent)) tlb_flush_pte_range(tlb, addr, nr_ptes * PAGE_SIZE); } /* * Get max length of consecutive ptes pointing to PageAnonExclusive() pages or * !PageAnonExclusive() pages, starting from start_idx. Caller must enforce * that the ptes point to consecutive pages of the same anon large folio. */ static int page_anon_exclusive_sub_batch(int start_idx, int max_len, struct page *first_page, bool expected_anon_exclusive) { int idx; for (idx = start_idx + 1; idx < start_idx + max_len; ++idx) { if (expected_anon_exclusive != PageAnonExclusive(first_page + idx)) break; } return idx - start_idx; } /* * This function is a result of trying our very best to retain the * "avoid the write-fault handler" optimization. In can_change_pte_writable(), * if the vma is a private vma, and we cannot determine whether to change * the pte to writable just from the vma and the pte, we then need to look * at the actual page pointed to by the pte. Unfortunately, if we have a * batch of ptes pointing to consecutive pages of the same anon large folio, * the anon-exclusivity (or the negation) of the first page does not guarantee * the anon-exclusivity (or the negation) of the other pages corresponding to * the pte batch; hence in this case it is incorrect to decide to change or * not change the ptes to writable just by using information from the first * pte of the batch. Therefore, we must individually check all pages and * retrieve sub-batches. */ static void commit_anon_folio_batch(struct vm_area_struct *vma, struct folio *folio, struct page *first_page, unsigned long addr, pte_t *ptep, pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) { bool expected_anon_exclusive; int sub_batch_idx = 0; int len; while (nr_ptes) { expected_anon_exclusive = PageAnonExclusive(first_page + sub_batch_idx); len = page_anon_exclusive_sub_batch(sub_batch_idx, nr_ptes, first_page, expected_anon_exclusive); prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, len, sub_batch_idx, expected_anon_exclusive, tlb); sub_batch_idx += len; nr_ptes -= len; } } static void set_write_prot_commit_flush_ptes(struct vm_area_struct *vma, struct folio *folio, struct page *page, unsigned long addr, pte_t *ptep, pte_t oldpte, pte_t ptent, int nr_ptes, struct mmu_gather *tlb) { bool set_write; if (vma->vm_flags & VM_SHARED) { set_write = can_change_shared_pte_writable(vma, ptent); prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes, /* idx = */ 0, set_write, tlb); return; } set_write = maybe_change_pte_writable(vma, ptent) && (folio && folio_test_anon(folio)); if (!set_write) { prot_commit_flush_ptes(vma, addr, ptep, oldpte, ptent, nr_ptes, /* idx = */ 0, set_write, tlb); return; } commit_anon_folio_batch(vma, folio, page, addr, ptep, oldpte, ptent, nr_ptes, tlb); } static long change_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) { pte_t *pte, oldpte; spinlock_t *ptl; long pages = 0; int target_node = NUMA_NO_NODE; bool prot_numa = cp_flags & MM_CP_PROT_NUMA; bool uffd_wp = cp_flags & MM_CP_UFFD_WP; bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; int nr_ptes; tlb_change_page_size(tlb, PAGE_SIZE); pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); if (!pte) return -EAGAIN; /* Get target node for single threaded private VMAs */ if (prot_numa && !(vma->vm_flags & VM_SHARED) && atomic_read(&vma->vm_mm->mm_users) == 1) target_node = numa_node_id(); flush_tlb_batched_pending(vma->vm_mm); arch_enter_lazy_mmu_mode(); do { nr_ptes = 1; oldpte = ptep_get(pte); if (pte_present(oldpte)) { const fpb_t flags = FPB_RESPECT_SOFT_DIRTY | FPB_RESPECT_WRITE; int max_nr_ptes = (end - addr) >> PAGE_SHIFT; struct folio *folio = NULL; struct page *page; pte_t ptent; page = vm_normal_page(vma, addr, oldpte); if (page) folio = page_folio(page); /* * Avoid trapping faults against the zero or KSM * pages. See similar comment in change_huge_pmd. */ if (prot_numa) { int ret = prot_numa_skip(vma, addr, oldpte, pte, target_node, folio); if (ret) { /* determine batch to skip */ nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, /* flags = */ 0); continue; } } nr_ptes = mprotect_folio_pte_batch(folio, pte, oldpte, max_nr_ptes, flags); oldpte = modify_prot_start_ptes(vma, addr, pte, nr_ptes); ptent = pte_modify(oldpte, newprot); if (uffd_wp) ptent = pte_mkuffd_wp(ptent); else if (uffd_wp_resolve) ptent = pte_clear_uffd_wp(ptent); /* * In some writable, shared mappings, we might want * to catch actual write access -- see * vma_wants_writenotify(). * * In all writable, private mappings, we have to * properly handle COW. * * In both cases, we can sometimes still change PTEs * writable and avoid the write-fault handler, for * example, if a PTE is already dirty and no other * COW or special handling is required. */ if ((cp_flags & MM_CP_TRY_CHANGE_WRITABLE) && !pte_write(ptent)) set_write_prot_commit_flush_ptes(vma, folio, page, addr, pte, oldpte, ptent, nr_ptes, tlb); else prot_commit_flush_ptes(vma, addr, pte, oldpte, ptent, nr_ptes, /* idx = */ 0, /* set_write = */ false, tlb); pages += nr_ptes; } else if (is_swap_pte(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); pte_t newpte; if (is_writable_migration_entry(entry)) { struct folio *folio = pfn_swap_entry_folio(entry); /* * A protection check is difficult so * just be safe and disable write */ if (folio_test_anon(folio)) entry = make_readable_exclusive_migration_entry( swp_offset(entry)); else entry = make_readable_migration_entry(swp_offset(entry)); newpte = swp_entry_to_pte(entry); if (pte_swp_soft_dirty(oldpte)) newpte = pte_swp_mksoft_dirty(newpte); } else if (is_writable_device_private_entry(entry)) { /* * We do not preserve soft-dirtiness. See * copy_nonpresent_pte() for explanation. */ entry = make_readable_device_private_entry( swp_offset(entry)); newpte = swp_entry_to_pte(entry); if (pte_swp_uffd_wp(oldpte)) newpte = pte_swp_mkuffd_wp(newpte); } else if (is_pte_marker_entry(entry)) { /* * Ignore error swap entries unconditionally, * because any access should sigbus/sigsegv * anyway. */ if (is_poisoned_swp_entry(entry) || is_guard_swp_entry(entry)) continue; /* * If this is uffd-wp pte marker and we'd like * to unprotect it, drop it; the next page * fault will trigger without uffd trapping. */ if (uffd_wp_resolve) { pte_clear(vma->vm_mm, addr, pte); pages++; } continue; } else { newpte = oldpte; } if (uffd_wp) newpte = pte_swp_mkuffd_wp(newpte); else if (uffd_wp_resolve) newpte = pte_swp_clear_uffd_wp(newpte); if (!pte_same(oldpte, newpte)) { set_pte_at(vma->vm_mm, addr, pte, newpte); pages++; } } else { /* It must be an none page, or what else?.. */ WARN_ON_ONCE(!pte_none(oldpte)); /* * Nobody plays with any none ptes besides * userfaultfd when applying the protections. */ if (likely(!uffd_wp)) continue; if (userfaultfd_wp_use_markers(vma)) { /* * For file-backed mem, we need to be able to * wr-protect a none pte, because even if the * pte is none, the page/swap cache could * exist. Doing that by install a marker. */ set_pte_at(vma->vm_mm, addr, pte, make_pte_marker(PTE_MARKER_UFFD_WP)); pages++; } } } while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); return pages; } /* * Return true if we want to split THPs into PTE mappings in change * protection procedure, false otherwise. */ static inline bool pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags) { /* * pte markers only resides in pte level, if we need pte markers, * we need to split. For example, we cannot wr-protect a file thp * (e.g. 2M shmem) because file thp is handled differently when * split by erasing the pmd so far. */ return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma); } /* * Return true if we want to populate pgtables in change protection * procedure, false otherwise */ static inline bool pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags) { /* If not within ioctl(UFFDIO_WRITEPROTECT), then don't bother */ if (!(cp_flags & MM_CP_UFFD_WP)) return false; /* Populate if the userfaultfd mode requires pte markers */ return userfaultfd_wp_use_markers(vma); } /* * Populate the pgtable underneath for whatever reason if requested. * When {pte|pmd|...}_alloc() failed we treat it the same way as pgtable * allocation failures during page faults by kicking OOM and returning * error. */ #define change_pmd_prepare(vma, pmd, cp_flags) \ ({ \ long err = 0; \ if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \ if (pte_alloc(vma->vm_mm, pmd)) \ err = -ENOMEM; \ } \ err; \ }) /* * This is the general pud/p4d/pgd version of change_pmd_prepare(). We need to * have separate change_pmd_prepare() because pte_alloc() returns 0 on success, * while {pmd|pud|p4d}_alloc() returns the valid pointer on success. */ #define change_prepare(vma, high, low, addr, cp_flags) \ ({ \ long err = 0; \ if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \ low##_t *p = low##_alloc(vma->vm_mm, high, addr); \ if (p == NULL) \ err = -ENOMEM; \ } \ err; \ }) static inline long change_pmd_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) { pmd_t *pmd; unsigned long next; long pages = 0; unsigned long nr_huge_updates = 0; pmd = pmd_offset(pud, addr); do { long ret; pmd_t _pmd; again: next = pmd_addr_end(addr, end); ret = change_pmd_prepare(vma, pmd, cp_flags); if (ret) { pages = ret; break; } if (pmd_none(*pmd)) goto next; _pmd = pmdp_get_lockless(pmd); if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd)) { if ((next - addr != HPAGE_PMD_SIZE) || pgtable_split_needed(vma, cp_flags)) { __split_huge_pmd(vma, pmd, addr, false); /* * For file-backed, the pmd could have been * cleared; make sure pmd populated if * necessary, then fall-through to pte level. */ ret = change_pmd_prepare(vma, pmd, cp_flags); if (ret) { pages = ret; break; } } else { ret = change_huge_pmd(tlb, vma, pmd, addr, newprot, cp_flags); if (ret) { if (ret == HPAGE_PMD_NR) { pages += HPAGE_PMD_NR; nr_huge_updates++; } /* huge pmd was handled */ goto next; } } /* fall through, the trans huge pmd just split */ } ret = change_pte_range(tlb, vma, pmd, addr, next, newprot, cp_flags); if (ret < 0) goto again; pages += ret; next: cond_resched(); } while (pmd++, addr = next, addr != end); if (nr_huge_updates) count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates); return pages; } static inline long change_pud_range(struct mmu_gather *tlb, struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) { struct mmu_notifier_range range; pud_t *pudp, pud; unsigned long next; long pages = 0, ret; range.start = 0; pudp = pud_offset(p4d, addr); do { again: next = pud_addr_end(addr, end); ret = change_prepare(vma, pudp, pmd, addr, cp_flags); if (ret) { pages = ret; break; } pud = READ_ONCE(*pudp); if (pud_none(pud)) continue; if (!range.start) { mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA, 0, vma->vm_mm, addr, end); mmu_notifier_invalidate_range_start(&range); } if (pud_leaf(pud)) { if ((next - addr != PUD_SIZE) || pgtable_split_needed(vma, cp_flags)) { __split_huge_pud(vma, pudp, addr); goto again; } else { ret = change_huge_pud(tlb, vma, pudp, addr, newprot, cp_flags); if (ret == 0) goto again; /* huge pud was handled */ if (ret == HPAGE_PUD_NR) pages += HPAGE_PUD_NR; continue; } } pages += change_pmd_range(tlb, vma, pudp, addr, next, newprot, cp_flags); } while (pudp++, addr = next, addr != end); if (range.start) mmu_notifier_invalidate_range_end(&range); return pages; } static inline long change_p4d_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) { p4d_t *p4d; unsigned long next; long pages = 0, ret; p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); ret = change_prepare(vma, p4d, pud, addr, cp_flags); if (ret) return ret; if (p4d_none_or_clear_bad(p4d)) continue; pages += change_pud_range(tlb, vma, p4d, addr, next, newprot, cp_flags); } while (p4d++, addr = next, addr != end); return pages; } static long change_protection_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, pgprot_t newprot, unsigned long cp_flags) { struct mm_struct *mm = vma->vm_mm; pgd_t *pgd; unsigned long next; long pages = 0, ret; BUG_ON(addr >= end); pgd = pgd_offset(mm, addr); tlb_start_vma(tlb, vma); do { next = pgd_addr_end(addr, end); ret = change_prepare(vma, pgd, p4d, addr, cp_flags); if (ret) { pages = ret; break; } if (pgd_none_or_clear_bad(pgd)) continue; pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot, cp_flags); } while (pgd++, addr = next, addr != end); tlb_end_vma(tlb, vma); return pages; } long change_protection(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long cp_flags) { pgprot_t newprot = vma->vm_page_prot; long pages; BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL); #ifdef CONFIG_NUMA_BALANCING /* * Ordinary protection updates (mprotect, uffd-wp, softdirty tracking) * are expected to reflect their requirements via VMA flags such that * vma_set_page_prot() will adjust vma->vm_page_prot accordingly. */ if (cp_flags & MM_CP_PROT_NUMA) newprot = PAGE_NONE; #else WARN_ON_ONCE(cp_flags & MM_CP_PROT_NUMA); #endif if (is_vm_hugetlb_page(vma)) pages = hugetlb_change_protection(vma, start, end, newprot, cp_flags); else pages = change_protection_range(tlb, vma, start, end, newprot, cp_flags); return pages; } static int prot_none_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { return pfn_modify_allowed(pte_pfn(ptep_get(pte)), *(pgprot_t *)(walk->private)) ? 0 : -EACCES; } static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long next, struct mm_walk *walk) { return pfn_modify_allowed(pte_pfn(ptep_get(pte)), *(pgprot_t *)(walk->private)) ? 0 : -EACCES; } static int prot_none_test(unsigned long addr, unsigned long next, struct mm_walk *walk) { return 0; } static const struct mm_walk_ops prot_none_walk_ops = { .pte_entry = prot_none_pte_entry, .hugetlb_entry = prot_none_hugetlb_entry, .test_walk = prot_none_test, .walk_lock = PGWALK_WRLOCK, }; int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, vm_flags_t newflags) { struct mm_struct *mm = vma->vm_mm; vm_flags_t oldflags = READ_ONCE(vma->vm_flags); long nrpages = (end - start) >> PAGE_SHIFT; unsigned int mm_cp_flags = 0; unsigned long charged = 0; int error; if (vma_is_sealed(vma)) return -EPERM; if (newflags == oldflags) { *pprev = vma; return 0; } /* * Do PROT_NONE PFN permission checks here when we can still * bail out without undoing a lot of state. This is a rather * uncommon case, so doesn't need to be very optimized. */ if (arch_has_pfn_modify_check() && (oldflags & (VM_PFNMAP|VM_MIXEDMAP)) && (newflags & VM_ACCESS_FLAGS) == 0) { pgprot_t new_pgprot = vm_get_page_prot(newflags); error = walk_page_range(current->mm, start, end, &prot_none_walk_ops, &new_pgprot); if (error) return error; } /* * If we make a private mapping writable we increase our commit; * but (without finer accounting) cannot reduce our commit if we * make it unwritable again except in the anonymous case where no * anon_vma has yet to be assigned. * * hugetlb mapping were accounted for even if read-only so there is * no need to account for them here. */ if (newflags & VM_WRITE) { /* Check space limits when area turns into data. */ if (!may_expand_vm(mm, newflags, nrpages) && may_expand_vm(mm, oldflags, nrpages)) return -ENOMEM; if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB| VM_SHARED|VM_NORESERVE))) { charged = nrpages; if (security_vm_enough_memory_mm(mm, charged)) return -ENOMEM; newflags |= VM_ACCOUNT; } } else if ((oldflags & VM_ACCOUNT) && vma_is_anonymous(vma) && !vma->anon_vma) { newflags &= ~VM_ACCOUNT; } vma = vma_modify_flags(vmi, *pprev, vma, start, end, newflags); if (IS_ERR(vma)) { error = PTR_ERR(vma); goto fail; } *pprev = vma; /* * vm_flags and vm_page_prot are protected by the mmap_lock * held in write mode. */ vma_start_write(vma); vm_flags_reset_once(vma, newflags); if (vma_wants_manual_pte_write_upgrade(vma)) mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE; vma_set_page_prot(vma); change_protection(tlb, vma, start, end, mm_cp_flags); if ((oldflags & VM_ACCOUNT) && !(newflags & VM_ACCOUNT)) vm_unacct_memory(nrpages); /* * Private VM_LOCKED VMA becoming writable: trigger COW to avoid major * fault on access. */ if ((oldflags & (VM_WRITE | VM_SHARED | VM_LOCKED)) == VM_LOCKED && (newflags & VM_WRITE)) { populate_vma_page_range(vma, start, end, NULL); } vm_stat_account(mm, oldflags, -nrpages); vm_stat_account(mm, newflags, nrpages); perf_event_mmap(vma); return 0; fail: vm_unacct_memory(charged); return error; } /* * pkey==-1 when doing a legacy mprotect() */ static int do_mprotect_pkey(unsigned long start, size_t len, unsigned long prot, int pkey) { unsigned long nstart, end, tmp, reqprot; struct vm_area_struct *vma, *prev; int error; const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP); const bool rier = (current->personality & READ_IMPLIES_EXEC) && (prot & PROT_READ); struct mmu_gather tlb; struct vma_iterator vmi; start = untagged_addr(start); prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP); if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */ return -EINVAL; if (start & ~PAGE_MASK) return -EINVAL; if (!len) return 0; len = PAGE_ALIGN(len); end = start + len; if (end <= start) return -ENOMEM; if (!arch_validate_prot(prot, start)) return -EINVAL; reqprot = prot; if (mmap_write_lock_killable(current->mm)) return -EINTR; /* * If userspace did not allocate the pkey, do not let * them use it here. */ error = -EINVAL; if ((pkey != -1) && !mm_pkey_is_allocated(current->mm, pkey)) goto out; vma_iter_init(&vmi, current->mm, start); vma = vma_find(&vmi, end); error = -ENOMEM; if (!vma) goto out; if (unlikely(grows & PROT_GROWSDOWN)) { if (vma->vm_start >= end) goto out; start = vma->vm_start; error = -EINVAL; if (!(vma->vm_flags & VM_GROWSDOWN)) goto out; } else { if (vma->vm_start > start) goto out; if (unlikely(grows & PROT_GROWSUP)) { end = vma->vm_end; error = -EINVAL; if (!(vma->vm_flags & VM_GROWSUP)) goto out; } } prev = vma_prev(&vmi); if (start > vma->vm_start) prev = vma; tlb_gather_mmu(&tlb, current->mm); nstart = start; tmp = vma->vm_start; for_each_vma_range(vmi, vma, end) { vm_flags_t mask_off_old_flags; vm_flags_t newflags; int new_vma_pkey; if (vma->vm_start != tmp) { error = -ENOMEM; break; } /* Does the application expect PROT_READ to imply PROT_EXEC */ if (rier && (vma->vm_flags & VM_MAYEXEC)) prot |= PROT_EXEC; /* * Each mprotect() call explicitly passes r/w/x permissions. * If a permission is not passed to mprotect(), it must be * cleared from the VMA. */ mask_off_old_flags = VM_ACCESS_FLAGS | VM_FLAGS_CLEAR; new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); newflags = calc_vm_prot_bits(prot, new_vma_pkey); newflags |= (vma->vm_flags & ~mask_off_old_flags); /* newflags >> 4 shift VM_MAY% in place of VM_% */ if ((newflags & ~(newflags >> 4)) & VM_ACCESS_FLAGS) { error = -EACCES; break; } if (map_deny_write_exec(vma->vm_flags, newflags)) { error = -EACCES; break; } /* Allow architectures to sanity-check the new flags */ if (!arch_validate_flags(newflags)) { error = -EINVAL; break; } error = security_file_mprotect(vma, reqprot, prot); if (error) break; tmp = vma->vm_end; if (tmp > end) tmp = end; if (vma->vm_ops && vma->vm_ops->mprotect) { error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags); if (error) break; } error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags); if (error) break; tmp = vma_iter_end(&vmi); nstart = tmp; prot = reqprot; } tlb_finish_mmu(&tlb); if (!error && tmp < end) error = -ENOMEM; out: mmap_write_unlock(current->mm); return error; } SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len, unsigned long, prot) { return do_mprotect_pkey(start, len, prot, -1); } #ifdef CONFIG_ARCH_HAS_PKEYS SYSCALL_DEFINE4(pkey_mprotect, unsigned long, start, size_t, len, unsigned long, prot, int, pkey) { return do_mprotect_pkey(start, len, prot, pkey); } SYSCALL_DEFINE2(pkey_alloc, unsigned long, flags, unsigned long, init_val) { int pkey; int ret; /* No flags supported yet. */ if (flags) return -EINVAL; /* check for unsupported init values */ if (init_val & ~PKEY_ACCESS_MASK) return -EINVAL; mmap_write_lock(current->mm); pkey = mm_pkey_alloc(current->mm); ret = -ENOSPC; if (pkey == -1) goto out; ret = arch_set_user_pkey_access(current, pkey, init_val); if (ret) { mm_pkey_free(current->mm, pkey); goto out; } ret = pkey; out: mmap_write_unlock(current->mm); return ret; } SYSCALL_DEFINE1(pkey_free, int, pkey) { int ret; mmap_write_lock(current->mm); ret = mm_pkey_free(current->mm, pkey); mmap_write_unlock(current->mm); /* * We could provide warnings or errors if any VMA still * has the pkey set here. */ return ret; } #endif /* CONFIG_ARCH_HAS_PKEYS */ |
| 4 4 4 5 5 5 1 1 1 1 12 8 8 1 1 1 13 13 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 | // SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth virtual HCI driver * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com> * Copyright (C) 2004-2006 Marcel Holtmann <marcel@holtmann.org> */ #include <linux/module.h> #include <linux/unaligned.h> #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/miscdevice.h> #include <linux/debugfs.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "1.5" static bool amp; struct vhci_data { struct hci_dev *hdev; wait_queue_head_t read_wait; struct sk_buff_head readq; struct mutex open_mutex; struct delayed_work open_timeout; struct work_struct suspend_work; bool suspended; bool wakeup; __u16 msft_opcode; bool aosp_capable; atomic_t initialized; }; static int vhci_open_dev(struct hci_dev *hdev) { return 0; } static int vhci_close_dev(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_flush(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct vhci_data *data = hci_get_drvdata(hdev); memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&data->readq, skb); if (atomic_read(&data->initialized)) wake_up_interruptible(&data->read_wait); return 0; } static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id) { *data_path_id = 0; return 0; } static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type, struct bt_codec *codec, __u8 *vnd_len, __u8 **vnd_data) { if (type != ESCO_LINK) return -EINVAL; *vnd_len = 0; *vnd_data = NULL; return 0; } static bool vhci_wakeup(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); return data->wakeup; } static ssize_t force_suspend_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; char buf[3]; buf[0] = data->suspended ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static void vhci_suspend_work(struct work_struct *work) { struct vhci_data *data = container_of(work, struct vhci_data, suspend_work); if (data->suspended) hci_suspend_dev(data->hdev); else hci_resume_dev(data->hdev); } static ssize_t force_suspend_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (data->suspended == enable) return -EALREADY; data->suspended = enable; schedule_work(&data->suspend_work); return count; } static const struct file_operations force_suspend_fops = { .open = simple_open, .read = force_suspend_read, .write = force_suspend_write, .llseek = default_llseek, }; static ssize_t force_wakeup_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; char buf[3]; buf[0] = data->wakeup ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t force_wakeup_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (data->wakeup == enable) return -EALREADY; data->wakeup = enable; return count; } static const struct file_operations force_wakeup_fops = { .open = simple_open, .read = force_wakeup_read, .write = force_wakeup_write, .llseek = default_llseek, }; static int msft_opcode_set(void *data, u64 val) { struct vhci_data *vhci = data; if (val > 0xffff || hci_opcode_ogf(val) != 0x3f) return -EINVAL; if (vhci->msft_opcode) return -EALREADY; vhci->msft_opcode = val; return 0; } static int msft_opcode_get(void *data, u64 *val) { struct vhci_data *vhci = data; *val = vhci->msft_opcode; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(msft_opcode_fops, msft_opcode_get, msft_opcode_set, "%llu\n"); static ssize_t aosp_capable_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *vhci = file->private_data; char buf[3]; buf[0] = vhci->aosp_capable ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t aosp_capable_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *vhci = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (!enable) return -EINVAL; if (vhci->aosp_capable) return -EALREADY; vhci->aosp_capable = enable; return count; } static const struct file_operations aosp_capable_fops = { .open = simple_open, .read = aosp_capable_read, .write = aosp_capable_write, .llseek = default_llseek, }; static int vhci_setup(struct hci_dev *hdev) { struct vhci_data *vhci = hci_get_drvdata(hdev); if (vhci->msft_opcode) hci_set_msft_opcode(hdev, vhci->msft_opcode); if (vhci->aosp_capable) hci_set_aosp_capable(hdev); return 0; } static void vhci_coredump(struct hci_dev *hdev) { /* No need to do anything */ } static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) { const char *buf; buf = "Controller Name: vhci_ctrl\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Firmware Version: vhci_fw\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Driver: vhci_drv\n"; skb_put_data(skb, buf, strlen(buf)); buf = "Vendor: vhci\n"; skb_put_data(skb, buf, strlen(buf)); } #define MAX_COREDUMP_LINE_LEN 40 struct devcoredump_test_data { enum devcoredump_state state; unsigned int timeout; char data[MAX_COREDUMP_LINE_LEN]; }; static inline void force_devcd_timeout(struct hci_dev *hdev, unsigned int timeout) { #ifdef CONFIG_DEV_COREDUMP hdev->dump.timeout = secs_to_jiffies(timeout); #endif } static ssize_t force_devcd_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; struct hci_dev *hdev = data->hdev; struct sk_buff *skb = NULL; struct devcoredump_test_data dump_data; size_t data_size; int ret; if (count < offsetof(struct devcoredump_test_data, data) || count > sizeof(dump_data)) return -EINVAL; if (copy_from_user(&dump_data, user_buf, count)) return -EFAULT; data_size = count - offsetof(struct devcoredump_test_data, data); skb = alloc_skb(data_size, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_put_data(skb, &dump_data.data, data_size); hci_devcd_register(hdev, vhci_coredump, vhci_coredump_hdr, NULL); /* Force the devcoredump timeout */ if (dump_data.timeout) force_devcd_timeout(hdev, dump_data.timeout); ret = hci_devcd_init(hdev, skb->len); if (ret) { BT_ERR("Failed to generate devcoredump"); kfree_skb(skb); return ret; } hci_devcd_append(hdev, skb); switch (dump_data.state) { case HCI_DEVCOREDUMP_DONE: hci_devcd_complete(hdev); break; case HCI_DEVCOREDUMP_ABORT: hci_devcd_abort(hdev); break; case HCI_DEVCOREDUMP_TIMEOUT: /* Do nothing */ break; default: return -EINVAL; } return count; } static const struct file_operations force_devcoredump_fops = { .open = simple_open, .write = force_devcd_write, }; static int __vhci_create_device(struct vhci_data *data, __u8 opcode) { struct hci_dev *hdev; struct sk_buff *skb; if (data->hdev) return -EBADFD; /* bits 2-5 are reserved (must be zero) */ if (opcode & 0x3c) return -EINVAL; skb = bt_skb_alloc(4, GFP_KERNEL); if (!skb) return -ENOMEM; hdev = hci_alloc_dev(); if (!hdev) { kfree_skb(skb); return -ENOMEM; } data->hdev = hdev; hdev->bus = HCI_VIRTUAL; hci_set_drvdata(hdev, data); hdev->open = vhci_open_dev; hdev->close = vhci_close_dev; hdev->flush = vhci_flush; hdev->send = vhci_send_frame; hdev->get_data_path_id = vhci_get_data_path_id; hdev->get_codec_config_data = vhci_get_codec_config_data; hdev->wakeup = vhci_wakeup; hdev->setup = vhci_setup; hci_set_quirk(hdev, HCI_QUIRK_NON_PERSISTENT_SETUP); hci_set_quirk(hdev, HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED); /* bit 6 is for external configuration */ if (opcode & 0x40) hci_set_quirk(hdev, HCI_QUIRK_EXTERNAL_CONFIG); /* bit 7 is for raw device */ if (opcode & 0x80) hci_set_quirk(hdev, HCI_QUIRK_RAW_DEVICE); if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); data->hdev = NULL; kfree_skb(skb); return -EBUSY; } debugfs_create_file("force_suspend", 0644, hdev->debugfs, data, &force_suspend_fops); debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, &force_wakeup_fops); if (IS_ENABLED(CONFIG_BT_MSFTEXT)) debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data, &msft_opcode_fops); if (IS_ENABLED(CONFIG_BT_AOSPEXT)) debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data, &aosp_capable_fops); debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data, &force_devcoredump_fops); hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; skb_put_u8(skb, 0xff); skb_put_u8(skb, opcode); put_unaligned_le16(hdev->id, skb_put(skb, 2)); skb_queue_head(&data->readq, skb); atomic_inc(&data->initialized); wake_up_interruptible(&data->read_wait); return 0; } static int vhci_create_device(struct vhci_data *data, __u8 opcode) { int err; mutex_lock(&data->open_mutex); err = __vhci_create_device(data, opcode); mutex_unlock(&data->open_mutex); return err; } static inline ssize_t vhci_get_user(struct vhci_data *data, struct iov_iter *from) { size_t len = iov_iter_count(from); struct sk_buff *skb; __u8 pkt_type, opcode; int ret; if (len < 2 || len > HCI_MAX_FRAME_SIZE) return -EINVAL; skb = bt_skb_alloc(len, GFP_KERNEL); if (!skb) return -ENOMEM; if (!copy_from_iter_full(skb_put(skb, len), len, from)) { kfree_skb(skb); return -EFAULT; } pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); switch (pkt_type) { case HCI_EVENT_PKT: case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: if (!data->hdev) { kfree_skb(skb); return -ENODEV; } hci_skb_pkt_type(skb) = pkt_type; ret = hci_recv_frame(data->hdev, skb); break; case HCI_VENDOR_PKT: cancel_delayed_work_sync(&data->open_timeout); opcode = *((__u8 *) skb->data); skb_pull(skb, 1); if (skb->len > 0) { kfree_skb(skb); return -EINVAL; } kfree_skb(skb); ret = vhci_create_device(data, opcode); break; default: kfree_skb(skb); return -EINVAL; } return (ret < 0) ? ret : len; } static inline ssize_t vhci_put_user(struct vhci_data *data, struct sk_buff *skb, char __user *buf, int count) { char __user *ptr = buf; int len; len = min_t(unsigned int, skb->len, count); if (copy_to_user(ptr, skb->data, len)) return -EFAULT; if (!data->hdev) return len; data->hdev->stat.byte_tx += len; switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: data->hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: data->hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: data->hdev->stat.sco_tx++; break; } return len; } static ssize_t vhci_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct vhci_data *data = file->private_data; struct sk_buff *skb; ssize_t ret = 0; while (count) { skb = skb_dequeue(&data->readq); if (skb) { ret = vhci_put_user(data, skb, buf, count); if (ret < 0) skb_queue_head(&data->readq, skb); else kfree_skb(skb); break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } ret = wait_event_interruptible(data->read_wait, !skb_queue_empty(&data->readq)); if (ret < 0) break; } return ret; } static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct vhci_data *data = file->private_data; return vhci_get_user(data, from); } static __poll_t vhci_poll(struct file *file, poll_table *wait) { struct vhci_data *data = file->private_data; poll_wait(file, &data->read_wait, wait); if (!skb_queue_empty(&data->readq)) return EPOLLIN | EPOLLRDNORM; return EPOLLOUT | EPOLLWRNORM; } static void vhci_open_timeout(struct work_struct *work) { struct vhci_data *data = container_of(work, struct vhci_data, open_timeout.work); vhci_create_device(data, 0x00); } static int vhci_open(struct inode *inode, struct file *file) { struct vhci_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; skb_queue_head_init(&data->readq); init_waitqueue_head(&data->read_wait); mutex_init(&data->open_mutex); INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout); INIT_WORK(&data->suspend_work, vhci_suspend_work); file->private_data = data; nonseekable_open(inode, file); schedule_delayed_work(&data->open_timeout, secs_to_jiffies(1)); return 0; } static int vhci_release(struct inode *inode, struct file *file) { struct vhci_data *data = file->private_data; struct hci_dev *hdev; cancel_delayed_work_sync(&data->open_timeout); flush_work(&data->suspend_work); hdev = data->hdev; if (hdev) { hci_unregister_dev(hdev); hci_free_dev(hdev); } skb_queue_purge(&data->readq); file->private_data = NULL; kfree(data); return 0; } static const struct file_operations vhci_fops = { .owner = THIS_MODULE, .read = vhci_read, .write_iter = vhci_write, .poll = vhci_poll, .open = vhci_open, .release = vhci_release, }; static struct miscdevice vhci_miscdev = { .name = "vhci", .fops = &vhci_fops, .minor = VHCI_MINOR, }; module_misc_device(vhci_miscdev); module_param(amp, bool, 0644); MODULE_PARM_DESC(amp, "Create AMP controller device"); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("devname:vhci"); MODULE_ALIAS_MISCDEV(VHCI_MINOR); |
| 8 8 8 8 8 8 8 8 5 8 8 8 8 8 412 415 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 | // SPDX-License-Identifier: GPL-2.0-or-later /* * (c) 1999 Andreas Gal <gal@cs.uni-magdeburg.de> * (c) 2000-2001 Vojtech Pavlik <vojtech@ucw.cz> * (c) 2007-2009 Jiri Kosina * * HID debugging support */ /* * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/kfifo.h> #include <linux/sched/signal.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/poll.h> #include <linux/hid.h> #include <linux/hid-debug.h> static struct dentry *hid_debug_root; struct hid_usage_entry { unsigned page; unsigned usage; const char *description; }; static const struct hid_usage_entry hid_usage_table[] = { { 0x00, 0, "Undefined" }, { 0x01, 0, "GenericDesktop" }, { 0x01, 0x0001, "Pointer" }, { 0x01, 0x0002, "Mouse" }, { 0x01, 0x0004, "Joystick" }, { 0x01, 0x0005, "Gamepad" }, { 0x01, 0x0006, "Keyboard" }, { 0x01, 0x0007, "Keypad" }, { 0x01, 0x0008, "MultiaxisController" }, { 0x01, 0x0009, "TabletPCSystemControls" }, { 0x01, 0x000a, "WaterCoolingDevice" }, { 0x01, 0x000b, "ComputerChassisDevice" }, { 0x01, 0x000c, "WirelessRadioControls" }, { 0x01, 0x000d, "PortableDeviceControl" }, { 0x01, 0x000e, "SystemMultiAxisController" }, { 0x01, 0x000f, "SpatialController" }, { 0x01, 0x0010, "AssistiveControl" }, { 0x01, 0x0011, "DeviceDock" }, { 0x01, 0x0012, "DockableDevice" }, { 0x01, 0x0013, "CallStateManagementControl" }, { 0x01, 0x0030, "X" }, { 0x01, 0x0031, "Y" }, { 0x01, 0x0032, "Z" }, { 0x01, 0x0033, "Rx" }, { 0x01, 0x0034, "Ry" }, { 0x01, 0x0035, "Rz" }, { 0x01, 0x0036, "Slider" }, { 0x01, 0x0037, "Dial" }, { 0x01, 0x0038, "Wheel" }, { 0x01, 0x0039, "HatSwitch" }, { 0x01, 0x003a, "CountedBuffer" }, { 0x01, 0x003b, "ByteCount" }, { 0x01, 0x003c, "MotionWakeup" }, { 0x01, 0x003d, "Start" }, { 0x01, 0x003e, "Select" }, { 0x01, 0x0040, "Vx" }, { 0x01, 0x0041, "Vy" }, { 0x01, 0x0042, "Vz" }, { 0x01, 0x0043, "Vbrx" }, { 0x01, 0x0044, "Vbry" }, { 0x01, 0x0045, "Vbrz" }, { 0x01, 0x0046, "Vno" }, { 0x01, 0x0047, "FeatureNotification" }, { 0x01, 0x0048, "ResolutionMultiplier" }, { 0x01, 0x0049, "Qx" }, { 0x01, 0x004a, "Qy" }, { 0x01, 0x004b, "Qz" }, { 0x01, 0x004c, "Qw" }, { 0x01, 0x0080, "SystemControl" }, { 0x01, 0x0081, "SystemPowerDown" }, { 0x01, 0x0082, "SystemSleep" }, { 0x01, 0x0083, "SystemWakeUp" }, { 0x01, 0x0084, "SystemContextMenu" }, { 0x01, 0x0085, "SystemMainMenu" }, { 0x01, 0x0086, "SystemAppMenu" }, { 0x01, 0x0087, "SystemMenuHelp" }, { 0x01, 0x0088, "SystemMenuExit" }, { 0x01, 0x0089, "SystemMenuSelect" }, { 0x01, 0x008a, "SystemMenuRight" }, { 0x01, 0x008b, "SystemMenuLeft" }, { 0x01, 0x008c, "SystemMenuUp" }, { 0x01, 0x008d, "SystemMenuDown" }, { 0x01, 0x008e, "SystemColdRestart" }, { 0x01, 0x008f, "SystemWarmRestart" }, { 0x01, 0x0090, "DpadUp" }, { 0x01, 0x0091, "DpadDown" }, { 0x01, 0x0092, "DpadRight" }, { 0x01, 0x0093, "DpadLeft" }, { 0x01, 0x0094, "IndexTrigger" }, { 0x01, 0x0095, "PalmTrigger" }, { 0x01, 0x0096, "Thumbstick" }, { 0x01, 0x0097, "SystemFunctionShift" }, { 0x01, 0x0098, "SystemFunctionShiftLock" }, { 0x01, 0x0099, "SystemFunctionShiftLockIndicator" }, { 0x01, 0x009a, "SystemDismissNotification" }, { 0x01, 0x009b, "SystemDoNotDisturb" }, { 0x01, 0x00a0, "SystemDock" }, { 0x01, 0x00a1, "SystemUndock" }, { 0x01, 0x00a2, "SystemSetup" }, { 0x01, 0x00a3, "SystemBreak" }, { 0x01, 0x00a4, "SystemDebuggerBreak" }, { 0x01, 0x00a5, "ApplicationBreak" }, { 0x01, 0x00a6, "ApplicationDebuggerBreak" }, { 0x01, 0x00a7, "SystemSpeakerMute" }, { 0x01, 0x00a8, "SystemHibernate" }, { 0x01, 0x00a9, "SystemMicrophoneMute" }, { 0x01, 0x00b0, "SystemDisplayInvert" }, { 0x01, 0x00b1, "SystemDisplayInternal" }, { 0x01, 0x00b2, "SystemDisplayExternal" }, { 0x01, 0x00b3, "SystemDisplayBoth" }, { 0x01, 0x00b4, "SystemDisplayDual" }, { 0x01, 0x00b5, "SystemDisplayToggleIntExtMode" }, { 0x01, 0x00b6, "SystemDisplaySwapPrimarySecondary" }, { 0x01, 0x00b7, "SystemDisplayToggleLCDAutoscale" }, { 0x01, 0x00c0, "SensorZone" }, { 0x01, 0x00c1, "RPM" }, { 0x01, 0x00c2, "CoolantLevel" }, { 0x01, 0x00c3, "CoolantCriticalLevel" }, { 0x01, 0x00c4, "CoolantPump" }, { 0x01, 0x00c5, "ChassisEnclosure" }, { 0x01, 0x00c6, "WirelessRadioButton" }, { 0x01, 0x00c7, "WirelessRadioLED" }, { 0x01, 0x00c8, "WirelessRadioSliderSwitch" }, { 0x01, 0x00c9, "SystemDisplayRotationLockButton" }, { 0x01, 0x00ca, "SystemDisplayRotationLockSliderSwitch" }, { 0x01, 0x00cb, "ControlEnable" }, { 0x01, 0x00d0, "DockableDeviceUniqueID" }, { 0x01, 0x00d1, "DockableDeviceVendorID" }, { 0x01, 0x00d2, "DockableDevicePrimaryUsagePage" }, { 0x01, 0x00d3, "DockableDevicePrimaryUsageID" }, { 0x01, 0x00d4, "DockableDeviceDockingState" }, { 0x01, 0x00d5, "DockableDeviceDisplayOcclusion" }, { 0x01, 0x00d6, "DockableDeviceObjectType" }, { 0x01, 0x00e0, "CallActiveLED" }, { 0x01, 0x00e1, "CallMuteToggle" }, { 0x01, 0x00e2, "CallMuteLED" }, { 0x02, 0, "SimulationControls" }, { 0x02, 0x0001, "FlightSimulationDevice" }, { 0x02, 0x0002, "AutomobileSimulationDevice" }, { 0x02, 0x0003, "TankSimulationDevice" }, { 0x02, 0x0004, "SpaceshipSimulationDevice" }, { 0x02, 0x0005, "SubmarineSimulationDevice" }, { 0x02, 0x0006, "SailingSimulationDevice" }, { 0x02, 0x0007, "MotorcycleSimulationDevice" }, { 0x02, 0x0008, "SportsSimulationDevice" }, { 0x02, 0x0009, "AirplaneSimulationDevice" }, { 0x02, 0x000a, "HelicopterSimulationDevice" }, { 0x02, 0x000b, "MagicCarpetSimulationDevice" }, { 0x02, 0x000c, "BicycleSimulationDevice" }, { 0x02, 0x0020, "FlightControlStick" }, { 0x02, 0x0021, "FlightStick" }, { 0x02, 0x0022, "CyclicControl" }, { 0x02, 0x0023, "CyclicTrim" }, { 0x02, 0x0024, "FlightYoke" }, { 0x02, 0x0025, "TrackControl" }, { 0x02, 0x00b0, "Aileron" }, { 0x02, 0x00b1, "AileronTrim" }, { 0x02, 0x00b2, "AntiTorqueControl" }, { 0x02, 0x00b3, "AutopilotEnable" }, { 0x02, 0x00b4, "ChaffRelease" }, { 0x02, 0x00b5, "CollectiveControl" }, { 0x02, 0x00b6, "DiveBrake" }, { 0x02, 0x00b7, "ElectronicCountermeasures" }, { 0x02, 0x00b8, "Elevator" }, { 0x02, 0x00b9, "ElevatorTrim" }, { 0x02, 0x00ba, "Rudder" }, { 0x02, 0x00bb, "Throttle" }, { 0x02, 0x00bc, "FlightCommunications" }, { 0x02, 0x00bd, "FlareRelease" }, { 0x02, 0x00be, "LandingGear" }, { 0x02, 0x00bf, "ToeBrake" }, { 0x02, 0x00c0, "Trigger" }, { 0x02, 0x00c1, "WeaponsArm" }, { 0x02, 0x00c2, "WeaponsSelect" }, { 0x02, 0x00c3, "WingFlaps" }, { 0x02, 0x00c4, "Accelerator" }, { 0x02, 0x00c5, "Brake" }, { 0x02, 0x00c6, "Clutch" }, { 0x02, 0x00c7, "Shifter" }, { 0x02, 0x00c8, "Steering" }, { 0x02, 0x00c9, "TurretDirection" }, { 0x02, 0x00ca, "BarrelElevation" }, { 0x02, 0x00cb, "DivePlane" }, { 0x02, 0x00cc, "Ballast" }, { 0x02, 0x00cd, "BicycleCrank" }, { 0x02, 0x00ce, "HandleBars" }, { 0x02, 0x00cf, "FrontBrake" }, { 0x02, 0x00d0, "RearBrake" }, { 0x03, 0, "VRControls" }, { 0x03, 0x0001, "Belt" }, { 0x03, 0x0002, "BodySuit" }, { 0x03, 0x0003, "Flexor" }, { 0x03, 0x0004, "Glove" }, { 0x03, 0x0005, "HeadTracker" }, { 0x03, 0x0006, "HeadMountedDisplay" }, { 0x03, 0x0007, "HandTracker" }, { 0x03, 0x0008, "Oculometer" }, { 0x03, 0x0009, "Vest" }, { 0x03, 0x000a, "AnimatronicDevice" }, { 0x03, 0x0020, "StereoEnable" }, { 0x03, 0x0021, "DisplayEnable" }, { 0x04, 0, "SportControls" }, { 0x04, 0x0001, "BaseballBat" }, { 0x04, 0x0002, "GolfClub" }, { 0x04, 0x0003, "RowingMachine" }, { 0x04, 0x0004, "Treadmill" }, { 0x04, 0x0030, "Oar" }, { 0x04, 0x0031, "Slope" }, { 0x04, 0x0032, "Rate" }, { 0x04, 0x0033, "StickSpeed" }, { 0x04, 0x0034, "StickFaceAngle" }, { 0x04, 0x0035, "StickHeelToe" }, { 0x04, 0x0036, "StickFollowThrough" }, { 0x04, 0x0037, "StickTempo" }, { 0x04, 0x0038, "StickType" }, { 0x04, 0x0039, "StickHeight" }, { 0x04, 0x0050, "Putter" }, { 0x04, 0x0051, "1Iron" }, { 0x04, 0x0052, "2Iron" }, { 0x04, 0x0053, "3Iron" }, { 0x04, 0x0054, "4Iron" }, { 0x04, 0x0055, "5Iron" }, { 0x04, 0x0056, "6Iron" }, { 0x04, 0x0057, "7Iron" }, { 0x04, 0x0058, "8Iron" }, { 0x04, 0x0059, "9Iron" }, { 0x04, 0x005a, "10Iron" }, { 0x04, 0x005b, "11Iron" }, { 0x04, 0x005c, "SandWedge" }, { 0x04, 0x005d, "LoftWedge" }, { 0x04, 0x005e, "PowerWedge" }, { 0x04, 0x005f, "1Wood" }, { 0x04, 0x0060, "3Wood" }, { 0x04, 0x0061, "5Wood" }, { 0x04, 0x0062, "7Wood" }, { 0x04, 0x0063, "9Wood" }, { 0x05, 0, "GameControls" }, { 0x05, 0x0001, "3DGameController" }, { 0x05, 0x0002, "PinballDevice" }, { 0x05, 0x0003, "GunDevice" }, { 0x05, 0x0020, "PointofView" }, { 0x05, 0x0021, "TurnRightLeft" }, { 0x05, 0x0022, "PitchForwardBackward" }, { 0x05, 0x0023, "RollRightLeft" }, { 0x05, 0x0024, "MoveRightLeft" }, { 0x05, 0x0025, "MoveForwardBackward" }, { 0x05, 0x0026, "MoveUpDown" }, { 0x05, 0x0027, "LeanRightLeft" }, { 0x05, 0x0028, "LeanForwardBackward" }, { 0x05, 0x0029, "HeightofPOV" }, { 0x05, 0x002a, "Flipper" }, { 0x05, 0x002b, "SecondaryFlipper" }, { 0x05, 0x002c, "Bump" }, { 0x05, 0x002d, "NewGame" }, { 0x05, 0x002e, "ShootBall" }, { 0x05, 0x002f, "Player" }, { 0x05, 0x0030, "GunBolt" }, { 0x05, 0x0031, "GunClip" }, { 0x05, 0x0032, "GunSelector" }, { 0x05, 0x0033, "GunSingleShot" }, { 0x05, 0x0034, "GunBurst" }, { 0x05, 0x0035, "GunAutomatic" }, { 0x05, 0x0036, "GunSafety" }, { 0x05, 0x0037, "GamepadFireJump" }, { 0x05, 0x0039, "GamepadTrigger" }, { 0x05, 0x003a, "FormfittingGamepad" }, { 0x06, 0, "GenericDeviceControls" }, { 0x06, 0x0001, "BackgroundNonuserControls" }, { 0x06, 0x0020, "BatteryStrength" }, { 0x06, 0x0021, "WirelessChannel" }, { 0x06, 0x0022, "WirelessID" }, { 0x06, 0x0023, "DiscoverWirelessControl" }, { 0x06, 0x0024, "SecurityCodeCharacterEntered" }, { 0x06, 0x0025, "SecurityCodeCharacterErased" }, { 0x06, 0x0026, "SecurityCodeCleared" }, { 0x06, 0x0027, "SequenceID" }, { 0x06, 0x0028, "SequenceIDReset" }, { 0x06, 0x0029, "RFSignalStrength" }, { 0x06, 0x002a, "SoftwareVersion" }, { 0x06, 0x002b, "ProtocolVersion" }, { 0x06, 0x002c, "HardwareVersion" }, { 0x06, 0x002d, "Major" }, { 0x06, 0x002e, "Minor" }, { 0x06, 0x002f, "Revision" }, { 0x06, 0x0030, "Handedness" }, { 0x06, 0x0031, "EitherHand" }, { 0x06, 0x0032, "LeftHand" }, { 0x06, 0x0033, "RightHand" }, { 0x06, 0x0034, "BothHands" }, { 0x06, 0x0040, "GripPoseOffset" }, { 0x06, 0x0041, "PointerPoseOffset" }, { 0x07, 0, "KeyboardKeypad" }, { 0x07, 0x0001, "ErrorRollOver" }, { 0x07, 0x0002, "POSTFail" }, { 0x07, 0x0003, "ErrorUndefined" }, { 0x07, 0x0004, "KeyboardA" }, { 0x07, 0x0005, "KeyboardB" }, { 0x07, 0x0006, "KeyboardC" }, { 0x07, 0x0007, "KeyboardD" }, { 0x07, 0x0008, "KeyboardE" }, { 0x07, 0x0009, "KeyboardF" }, { 0x07, 0x000a, "KeyboardG" }, { 0x07, 0x000b, "KeyboardH" }, { 0x07, 0x000c, "KeyboardI" }, { 0x07, 0x000d, "KeyboardJ" }, { 0x07, 0x000e, "KeyboardK" }, { 0x07, 0x000f, "KeyboardL" }, { 0x07, 0x0010, "KeyboardM" }, { 0x07, 0x0011, "KeyboardN" }, { 0x07, 0x0012, "KeyboardO" }, { 0x07, 0x0013, "KeyboardP" }, { 0x07, 0x0014, "KeyboardQ" }, { 0x07, 0x0015, "KeyboardR" }, { 0x07, 0x0016, "KeyboardS" }, { 0x07, 0x0017, "KeyboardT" }, { 0x07, 0x0018, "KeyboardU" }, { 0x07, 0x0019, "KeyboardV" }, { 0x07, 0x001a, "KeyboardW" }, { 0x07, 0x001b, "KeyboardX" }, { 0x07, 0x001c, "KeyboardY" }, { 0x07, 0x001d, "KeyboardZ" }, { 0x07, 0x001e, "Keyboard1andBang" }, { 0x07, 0x001f, "Keyboard2andAt" }, { 0x07, 0x0020, "Keyboard3andHash" }, { 0x07, 0x0021, "Keyboard4andDollar" }, { 0x07, 0x0022, "Keyboard5andPercent" }, { 0x07, 0x0023, "Keyboard6andCaret" }, { 0x07, 0x0024, "Keyboard7andAmpersand" }, { 0x07, 0x0025, "Keyboard8andStar" }, { 0x07, 0x0026, "Keyboard9andLeftBracket" }, { 0x07, 0x0027, "Keyboard0andRightBracket" }, { 0x07, 0x0028, "KeyboardReturnEnter" }, { 0x07, 0x0029, "KeyboardEscape" }, { 0x07, 0x002a, "KeyboardDelete" }, { 0x07, 0x002b, "KeyboardTab" }, { 0x07, 0x002c, "KeyboardSpacebar" }, { 0x07, 0x002d, "KeyboardDashandUnderscore" }, { 0x07, 0x002e, "KeyboardEqualsandPlus" }, { 0x07, 0x002f, "KeyboardLeftBrace" }, { 0x07, 0x0030, "KeyboardRightBrace" }, { 0x07, 0x0031, "KeyboardBackslashandPipe" }, { 0x07, 0x0032, "KeyboardNonUSHashandTilde" }, { 0x07, 0x0033, "KeyboardSemiColonandColon" }, { 0x07, 0x0034, "KeyboardLeftAposandDouble" }, { 0x07, 0x0035, "KeyboardGraveAccentandTilde" }, { 0x07, 0x0036, "KeyboardCommaandLessThan" }, { 0x07, 0x0037, "KeyboardPeriodandGreaterThan" }, { 0x07, 0x0038, "KeyboardForwardSlashandQuestionMark" }, { 0x07, 0x0039, "KeyboardCapsLock" }, { 0x07, 0x003a, "KeyboardF1" }, { 0x07, 0x003b, "KeyboardF2" }, { 0x07, 0x003c, "KeyboardF3" }, { 0x07, 0x003d, "KeyboardF4" }, { 0x07, 0x003e, "KeyboardF5" }, { 0x07, 0x003f, "KeyboardF6" }, { 0x07, 0x0040, "KeyboardF7" }, { 0x07, 0x0041, "KeyboardF8" }, { 0x07, 0x0042, "KeyboardF9" }, { 0x07, 0x0043, "KeyboardF10" }, { 0x07, 0x0044, "KeyboardF11" }, { 0x07, 0x0045, "KeyboardF12" }, { 0x07, 0x0046, "KeyboardPrintScreen" }, { 0x07, 0x0047, "KeyboardScrollLock" }, { 0x07, 0x0048, "KeyboardPause" }, { 0x07, 0x0049, "KeyboardInsert" }, { 0x07, 0x004a, "KeyboardHome" }, { 0x07, 0x004b, "KeyboardPageUp" }, { 0x07, 0x004c, "KeyboardDeleteForward" }, { 0x07, 0x004d, "KeyboardEnd" }, { 0x07, 0x004e, "KeyboardPageDown" }, { 0x07, 0x004f, "KeyboardRightArrow" }, { 0x07, 0x0050, "KeyboardLeftArrow" }, { 0x07, 0x0051, "KeyboardDownArrow" }, { 0x07, 0x0052, "KeyboardUpArrow" }, { 0x07, 0x0053, "KeypadNumLockandClear" }, { 0x07, 0x0054, "KeypadForwardSlash" }, { 0x07, 0x0055, "KeypadStar" }, { 0x07, 0x0056, "KeypadDash" }, { 0x07, 0x0057, "KeypadPlus" }, { 0x07, 0x0058, "KeypadENTER" }, { 0x07, 0x0059, "Keypad1andEnd" }, { 0x07, 0x005a, "Keypad2andDownArrow" }, { 0x07, 0x005b, "Keypad3andPageDn" }, { 0x07, 0x005c, "Keypad4andLeftArrow" }, { 0x07, 0x005d, "Keypad5" }, { 0x07, 0x005e, "Keypad6andRightArrow" }, { 0x07, 0x005f, "Keypad7andHome" }, { 0x07, 0x0060, "Keypad8andUpArrow" }, { 0x07, 0x0061, "Keypad9andPageUp" }, { 0x07, 0x0062, "Keypad0andInsert" }, { 0x07, 0x0063, "KeypadPeriodandDelete" }, { 0x07, 0x0064, "KeyboardNonUSBackslashandPipe" }, { 0x07, 0x0065, "KeyboardApplication" }, { 0x07, 0x0066, "KeyboardPower" }, { 0x07, 0x0067, "KeypadEquals" }, { 0x07, 0x0068, "KeyboardF13" }, { 0x07, 0x0069, "KeyboardF14" }, { 0x07, 0x006a, "KeyboardF15" }, { 0x07, 0x006b, "KeyboardF16" }, { 0x07, 0x006c, "KeyboardF17" }, { 0x07, 0x006d, "KeyboardF18" }, { 0x07, 0x006e, "KeyboardF19" }, { 0x07, 0x006f, "KeyboardF20" }, { 0x07, 0x0070, "KeyboardF21" }, { 0x07, 0x0071, "KeyboardF22" }, { 0x07, 0x0072, "KeyboardF23" }, { 0x07, 0x0073, "KeyboardF24" }, { 0x07, 0x0074, "KeyboardExecute" }, { 0x07, 0x0075, "KeyboardHelp" }, { 0x07, 0x0076, "KeyboardMenu" }, { 0x07, 0x0077, "KeyboardSelect" }, { 0x07, 0x0078, "KeyboardStop" }, { 0x07, 0x0079, "KeyboardAgain" }, { 0x07, 0x007a, "KeyboardUndo" }, { 0x07, 0x007b, "KeyboardCut" }, { 0x07, 0x007c, "KeyboardCopy" }, { 0x07, 0x007d, "KeyboardPaste" }, { 0x07, 0x007e, "KeyboardFind" }, { 0x07, 0x007f, "KeyboardMute" }, { 0x07, 0x0080, "KeyboardVolumeUp" }, { 0x07, 0x0081, "KeyboardVolumeDown" }, { 0x07, 0x0082, "KeyboardLockingCapsLock" }, { 0x07, 0x0083, "KeyboardLockingNumLock" }, { 0x07, 0x0084, "KeyboardLockingScrollLock" }, { 0x07, 0x0085, "KeypadComma" }, { 0x07, 0x0086, "KeypadEqualSign" }, { 0x07, 0x0087, "KeyboardInternational1" }, { 0x07, 0x0088, "KeyboardInternational2" }, { 0x07, 0x0089, "KeyboardInternational3" }, { 0x07, 0x008a, "KeyboardInternational4" }, { 0x07, 0x008b, "KeyboardInternational5" }, { 0x07, 0x008c, "KeyboardInternational6" }, { 0x07, 0x008d, "KeyboardInternational7" }, { 0x07, 0x008e, "KeyboardInternational8" }, { 0x07, 0x008f, "KeyboardInternational9" }, { 0x07, 0x0090, "KeyboardLANG1" }, { 0x07, 0x0091, "KeyboardLANG2" }, { 0x07, 0x0092, "KeyboardLANG3" }, { 0x07, 0x0093, "KeyboardLANG4" }, { 0x07, 0x0094, "KeyboardLANG5" }, { 0x07, 0x0095, "KeyboardLANG6" }, { 0x07, 0x0096, "KeyboardLANG7" }, { 0x07, 0x0097, "KeyboardLANG8" }, { 0x07, 0x0098, "KeyboardLANG9" }, { 0x07, 0x0099, "KeyboardAlternateErase" }, { 0x07, 0x009a, "KeyboardSysReqAttention" }, { 0x07, 0x009b, "KeyboardCancel" }, { 0x07, 0x009c, "KeyboardClear" }, { 0x07, 0x009d, "KeyboardPrior" }, { 0x07, 0x009e, "KeyboardReturn" }, { 0x07, 0x009f, "KeyboardSeparator" }, { 0x07, 0x00a0, "KeyboardOut" }, { 0x07, 0x00a1, "KeyboardOper" }, { 0x07, 0x00a2, "KeyboardClearAgain" }, { 0x07, 0x00a3, "KeyboardCrSelProps" }, { 0x07, 0x00a4, "KeyboardExSel" }, { 0x07, 0x00b0, "KeypadDouble0" }, { 0x07, 0x00b1, "KeypadTriple0" }, { 0x07, 0x00b2, "ThousandsSeparator" }, { 0x07, 0x00b3, "DecimalSeparator" }, { 0x07, 0x00b4, "CurrencyUnit" }, { 0x07, 0x00b5, "CurrencySubunit" }, { 0x07, 0x00b6, "KeypadLeftBracket" }, { 0x07, 0x00b7, "KeypadRightBracket" }, { 0x07, 0x00b8, "KeypadLeftBrace" }, { 0x07, 0x00b9, "KeypadRightBrace" }, { 0x07, 0x00ba, "KeypadTab" }, { 0x07, 0x00bb, "KeypadBackspace" }, { 0x07, 0x00bc, "KeypadA" }, { 0x07, 0x00bd, "KeypadB" }, { 0x07, 0x00be, "KeypadC" }, { 0x07, 0x00bf, "KeypadD" }, { 0x07, 0x00c0, "KeypadE" }, { 0x07, 0x00c1, "KeypadF" }, { 0x07, 0x00c2, "KeypadXOR" }, { 0x07, 0x00c3, "KeypadCaret" }, { 0x07, 0x00c4, "KeypadPercentage" }, { 0x07, 0x00c5, "KeypadLess" }, { 0x07, 0x00c6, "KeypadGreater" }, { 0x07, 0x00c7, "KeypadAmpersand" }, { 0x07, 0x00c8, "KeypadDoubleAmpersand" }, { 0x07, 0x00c9, "KeypadBar" }, { 0x07, 0x00ca, "KeypadDoubleBar" }, { 0x07, 0x00cb, "KeypadColon" }, { 0x07, 0x00cc, "KeypadHash" }, { 0x07, 0x00cd, "KeypadSpace" }, { 0x07, 0x00ce, "KeypadAt" }, { 0x07, 0x00cf, "KeypadBang" }, { 0x07, 0x00d0, "KeypadMemoryStore" }, { 0x07, 0x00d1, "KeypadMemoryRecall" }, { 0x07, 0x00d2, "KeypadMemoryClear" }, { 0x07, 0x00d3, "KeypadMemoryAdd" }, { 0x07, 0x00d4, "KeypadMemorySubtract" }, { 0x07, 0x00d5, "KeypadMemoryMultiply" }, { 0x07, 0x00d6, "KeypadMemoryDivide" }, { 0x07, 0x00d7, "KeypadPlusMinus" }, { 0x07, 0x00d8, "KeypadClear" }, { 0x07, 0x00d9, "KeypadClearEntry" }, { 0x07, 0x00da, "KeypadBinary" }, { 0x07, 0x00db, "KeypadOctal" }, { 0x07, 0x00dc, "KeypadDecimal" }, { 0x07, 0x00dd, "KeypadHexadecimal" }, { 0x07, 0x00e0, "KeyboardLeftControl" }, { 0x07, 0x00e1, "KeyboardLeftShift" }, { 0x07, 0x00e2, "KeyboardLeftAlt" }, { 0x07, 0x00e3, "KeyboardLeftGUI" }, { 0x07, 0x00e4, "KeyboardRightControl" }, { 0x07, 0x00e5, "KeyboardRightShift" }, { 0x07, 0x00e6, "KeyboardRightAlt" }, { 0x07, 0x00e7, "KeyboardRightGUI" }, { 0x08, 0, "LED" }, { 0x08, 0x0001, "NumLock" }, { 0x08, 0x0002, "CapsLock" }, { 0x08, 0x0003, "ScrollLock" }, { 0x08, 0x0004, "Compose" }, { 0x08, 0x0005, "Kana" }, { 0x08, 0x0006, "Power" }, { 0x08, 0x0007, "Shift" }, { 0x08, 0x0008, "DoNotDisturb" }, { 0x08, 0x0009, "Mute" }, { 0x08, 0x000a, "ToneEnable" }, { 0x08, 0x000b, "HighCutFilter" }, { 0x08, 0x000c, "LowCutFilter" }, { 0x08, 0x000d, "EqualizerEnable" }, { 0x08, 0x000e, "SoundFieldOn" }, { 0x08, 0x000f, "SurroundOn" }, { 0x08, 0x0010, "Repeat" }, { 0x08, 0x0011, "Stereo" }, { 0x08, 0x0012, "SamplingRateDetect" }, { 0x08, 0x0013, "Spinning" }, { 0x08, 0x0014, "CAV" }, { 0x08, 0x0015, "CLV" }, { 0x08, 0x0016, "RecordingFormatDetect" }, { 0x08, 0x0017, "OffHook" }, { 0x08, 0x0018, "Ring" }, { 0x08, 0x0019, "MessageWaiting" }, { 0x08, 0x001a, "DataMode" }, { 0x08, 0x001b, "BatteryOperation" }, { 0x08, 0x001c, "BatteryOK" }, { 0x08, 0x001d, "BatteryLow" }, { 0x08, 0x001e, "Speaker" }, { 0x08, 0x001f, "Headset" }, { 0x08, 0x0020, "Hold" }, { 0x08, 0x0021, "Microphone" }, { 0x08, 0x0022, "Coverage" }, { 0x08, 0x0023, "NightMode" }, { 0x08, 0x0024, "SendCalls" }, { 0x08, 0x0025, "CallPickup" }, { 0x08, 0x0026, "Conference" }, { 0x08, 0x0027, "Standby" }, { 0x08, 0x0028, "CameraOn" }, { 0x08, 0x0029, "CameraOff" }, { 0x08, 0x002a, "OnLine" }, { 0x08, 0x002b, "OffLine" }, { 0x08, 0x002c, "Busy" }, { 0x08, 0x002d, "Ready" }, { 0x08, 0x002e, "PaperOut" }, { 0x08, 0x002f, "PaperJam" }, { 0x08, 0x0030, "Remote" }, { 0x08, 0x0031, "Forward" }, { 0x08, 0x0032, "Reverse" }, { 0x08, 0x0033, "Stop" }, { 0x08, 0x0034, "Rewind" }, { 0x08, 0x0035, "FastForward" }, { 0x08, 0x0036, "Play" }, { 0x08, 0x0037, "Pause" }, { 0x08, 0x0038, "Record" }, { 0x08, 0x0039, "Error" }, { 0x08, 0x003a, "UsageSelectedIndicator" }, { 0x08, 0x003b, "UsageInUseIndicator" }, { 0x08, 0x003c, "UsageMultiModeIndicator" }, { 0x08, 0x003d, "IndicatorOn" }, { 0x08, 0x003e, "IndicatorFlash" }, { 0x08, 0x003f, "IndicatorSlowBlink" }, { 0x08, 0x0040, "IndicatorFastBlink" }, { 0x08, 0x0041, "IndicatorOff" }, { 0x08, 0x0042, "FlashOnTime" }, { 0x08, 0x0043, "SlowBlinkOnTime" }, { 0x08, 0x0044, "SlowBlinkOffTime" }, { 0x08, 0x0045, "FastBlinkOnTime" }, { 0x08, 0x0046, "FastBlinkOffTime" }, { 0x08, 0x0047, "UsageIndicatorColor" }, { 0x08, 0x0048, "IndicatorRed" }, { 0x08, 0x0049, "IndicatorGreen" }, { 0x08, 0x004a, "IndicatorAmber" }, { 0x08, 0x004b, "GenericIndicator" }, { 0x08, 0x004c, "SystemSuspend" }, { 0x08, 0x004d, "ExternalPowerConnected" }, { 0x08, 0x004e, "IndicatorBlue" }, { 0x08, 0x004f, "IndicatorOrange" }, { 0x08, 0x0050, "GoodStatus" }, { 0x08, 0x0051, "WarningStatus" }, { 0x08, 0x0052, "RGBLED" }, { 0x08, 0x0053, "RedLEDChannel" }, { 0x08, 0x0054, "BlueLEDChannel" }, { 0x08, 0x0055, "GreenLEDChannel" }, { 0x08, 0x0056, "LEDIntensity" }, { 0x08, 0x0057, "SystemMicrophoneMute" }, { 0x08, 0x0060, "PlayerIndicator" }, { 0x08, 0x0061, "Player1" }, { 0x08, 0x0062, "Player2" }, { 0x08, 0x0063, "Player3" }, { 0x08, 0x0064, "Player4" }, { 0x08, 0x0065, "Player5" }, { 0x08, 0x0066, "Player6" }, { 0x08, 0x0067, "Player7" }, { 0x08, 0x0068, "Player8" }, { 0x09, 0, "Button" }, { 0x0a, 0, "Ordinal" }, { 0x0b, 0, "TelephonyDevice" }, { 0x0b, 0x0001, "Phone" }, { 0x0b, 0x0002, "AnsweringMachine" }, { 0x0b, 0x0003, "MessageControls" }, { 0x0b, 0x0004, "Handset" }, { 0x0b, 0x0005, "Headset" }, { 0x0b, 0x0006, "TelephonyKeyPad" }, { 0x0b, 0x0007, "ProgrammableButton" }, { 0x0b, 0x0020, "HookSwitch" }, { 0x0b, 0x0021, "Flash" }, { 0x0b, 0x0022, "Feature" }, { 0x0b, 0x0023, "Hold" }, { 0x0b, 0x0024, "Redial" }, { 0x0b, 0x0025, "Transfer" }, { 0x0b, 0x0026, "Drop" }, { 0x0b, 0x0027, "Park" }, { 0x0b, 0x0028, "ForwardCalls" }, { 0x0b, 0x0029, "AlternateFunction" }, { 0x0b, 0x002a, "Line" }, { 0x0b, 0x002b, "SpeakerPhone" }, { 0x0b, 0x002c, "Conference" }, { 0x0b, 0x002d, "RingEnable" }, { 0x0b, 0x002e, "RingSelect" }, { 0x0b, 0x002f, "PhoneMute" }, { 0x0b, 0x0030, "CallerID" }, { 0x0b, 0x0031, "Send" }, { 0x0b, 0x0050, "SpeedDial" }, { 0x0b, 0x0051, "StoreNumber" }, { 0x0b, 0x0052, "RecallNumber" }, { 0x0b, 0x0053, "PhoneDirectory" }, { 0x0b, 0x0070, "VoiceMail" }, { 0x0b, 0x0071, "ScreenCalls" }, { 0x0b, 0x0072, "DoNotDisturb" }, { 0x0b, 0x0073, "Message" }, { 0x0b, 0x0074, "AnswerOnOff" }, { 0x0b, 0x0090, "InsideDialTone" }, { 0x0b, 0x0091, "OutsideDialTone" }, { 0x0b, 0x0092, "InsideRingTone" }, { 0x0b, 0x0093, "OutsideRingTone" }, { 0x0b, 0x0094, "PriorityRingTone" }, { 0x0b, 0x0095, "InsideRingback" }, { 0x0b, 0x0096, "PriorityRingback" }, { 0x0b, 0x0097, "LineBusyTone" }, { 0x0b, 0x0098, "ReorderTone" }, { 0x0b, 0x0099, "CallWaitingTone" }, { 0x0b, 0x009a, "ConfirmationTone1" }, { 0x0b, 0x009b, "ConfirmationTone2" }, { 0x0b, 0x009c, "TonesOff" }, { 0x0b, 0x009d, "OutsideRingback" }, { 0x0b, 0x009e, "Ringer" }, { 0x0b, 0x00b0, "PhoneKey0" }, { 0x0b, 0x00b1, "PhoneKey1" }, { 0x0b, 0x00b2, "PhoneKey2" }, { 0x0b, 0x00b3, "PhoneKey3" }, { 0x0b, 0x00b4, "PhoneKey4" }, { 0x0b, 0x00b5, "PhoneKey5" }, { 0x0b, 0x00b6, "PhoneKey6" }, { 0x0b, 0x00b7, "PhoneKey7" }, { 0x0b, 0x00b8, "PhoneKey8" }, { 0x0b, 0x00b9, "PhoneKey9" }, { 0x0b, 0x00ba, "PhoneKeyStar" }, { 0x0b, 0x00bb, "PhoneKeyPound" }, { 0x0b, 0x00bc, "PhoneKeyA" }, { 0x0b, 0x00bd, "PhoneKeyB" }, { 0x0b, 0x00be, "PhoneKeyC" }, { 0x0b, 0x00bf, "PhoneKeyD" }, { 0x0b, 0x00c0, "PhoneCallHistoryKey" }, { 0x0b, 0x00c1, "PhoneCallerIDKey" }, { 0x0b, 0x00c2, "PhoneSettingsKey" }, { 0x0b, 0x00f0, "HostControl" }, { 0x0b, 0x00f1, "HostAvailable" }, { 0x0b, 0x00f2, "HostCallActive" }, { 0x0b, 0x00f3, "ActivateHandsetAudio" }, { 0x0b, 0x00f4, "RingType" }, { 0x0b, 0x00f5, "RedialablePhoneNumber" }, { 0x0b, 0x00f8, "StopRingTone" }, { 0x0b, 0x00f9, "PSTNRingTone" }, { 0x0b, 0x00fa, "HostRingTone" }, { 0x0b, 0x00fb, "AlertSoundError" }, { 0x0b, 0x00fc, "AlertSoundConfirm" }, { 0x0b, 0x00fd, "AlertSoundNotification" }, { 0x0b, 0x00fe, "SilentRing" }, { 0x0b, 0x0108, "EmailMessageWaiting" }, { 0x0b, 0x0109, "VoicemailMessageWaiting" }, { 0x0b, 0x010a, "HostHold" }, { 0x0b, 0x0110, "IncomingCallHistoryCount" }, { 0x0b, 0x0111, "OutgoingCallHistoryCount" }, { 0x0b, 0x0112, "IncomingCallHistory" }, { 0x0b, 0x0113, "OutgoingCallHistory" }, { 0x0b, 0x0114, "PhoneLocale" }, { 0x0b, 0x0140, "PhoneTimeSecond" }, { 0x0b, 0x0141, "PhoneTimeMinute" }, { 0x0b, 0x0142, "PhoneTimeHour" }, { 0x0b, 0x0143, "PhoneDateDay" }, { 0x0b, 0x0144, "PhoneDateMonth" }, { 0x0b, 0x0145, "PhoneDateYear" }, { 0x0b, 0x0146, "HandsetNickname" }, { 0x0b, 0x0147, "AddressBookID" }, { 0x0b, 0x014a, "CallDuration" }, { 0x0b, 0x014b, "DualModePhone" }, { 0x0c, 0, "Consumer" }, { 0x0c, 0x0001, "ConsumerControl" }, { 0x0c, 0x0002, "NumericKeyPad" }, { 0x0c, 0x0003, "ProgrammableButtons" }, { 0x0c, 0x0004, "Microphone" }, { 0x0c, 0x0005, "Headphone" }, { 0x0c, 0x0006, "GraphicEqualizer" }, { 0x0c, 0x0020, "10" }, { 0x0c, 0x0021, "100" }, { 0x0c, 0x0022, "AMPM" }, { 0x0c, 0x0030, "Power" }, { 0x0c, 0x0031, "Reset" }, { 0x0c, 0x0032, "Sleep" }, { 0x0c, 0x0033, "SleepAfter" }, { 0x0c, 0x0034, "SleepMode" }, { 0x0c, 0x0035, "Illumination" }, { 0x0c, 0x0036, "FunctionButtons" }, { 0x0c, 0x0040, "Menu" }, { 0x0c, 0x0041, "MenuPick" }, { 0x0c, 0x0042, "MenuUp" }, { 0x0c, 0x0043, "MenuDown" }, { 0x0c, 0x0044, "MenuLeft" }, { 0x0c, 0x0045, "MenuRight" }, { 0x0c, 0x0046, "MenuEscape" }, { 0x0c, 0x0047, "MenuValueIncrease" }, { 0x0c, 0x0048, "MenuValueDecrease" }, { 0x0c, 0x0060, "DataOnScreen" }, { 0x0c, 0x0061, "ClosedCaption" }, { 0x0c, 0x0062, "ClosedCaptionSelect" }, { 0x0c, 0x0063, "VCRTV" }, { 0x0c, 0x0064, "BroadcastMode" }, { 0x0c, 0x0065, "Snapshot" }, { 0x0c, 0x0066, "Still" }, { 0x0c, 0x0067, "PictureinPictureToggle" }, { 0x0c, 0x0068, "PictureinPictureSwap" }, { 0x0c, 0x0069, "RedMenuButton" }, { 0x0c, 0x006a, "GreenMenuButton" }, { 0x0c, 0x006b, "BlueMenuButton" }, { 0x0c, 0x006c, "YellowMenuButton" }, { 0x0c, 0x006d, "Aspect" }, { 0x0c, 0x006e, "3DModeSelect" }, { 0x0c, 0x006f, "DisplayBrightnessIncrement" }, { 0x0c, 0x0070, "DisplayBrightnessDecrement" }, { 0x0c, 0x0071, "DisplayBrightness" }, { 0x0c, 0x0072, "DisplayBacklightToggle" }, { 0x0c, 0x0073, "DisplaySetBrightnesstoMinimum" }, { 0x0c, 0x0074, "DisplaySetBrightnesstoMaximum" }, { 0x0c, 0x0075, "DisplaySetAutoBrightness" }, { 0x0c, 0x0076, "CameraAccessEnabled" }, { 0x0c, 0x0077, "CameraAccessDisabled" }, { 0x0c, 0x0078, "CameraAccessToggle" }, { 0x0c, 0x0079, "KeyboardBrightnessIncrement" }, { 0x0c, 0x007a, "KeyboardBrightnessDecrement" }, { 0x0c, 0x007b, "KeyboardBacklightSetLevel" }, { 0x0c, 0x007c, "KeyboardBacklightOOC" }, { 0x0c, 0x007d, "KeyboardBacklightSetMinimum" }, { 0x0c, 0x007e, "KeyboardBacklightSetMaximum" }, { 0x0c, 0x007f, "KeyboardBacklightAuto" }, { 0x0c, 0x0080, "Selection" }, { 0x0c, 0x0081, "AssignSelection" }, { 0x0c, 0x0082, "ModeStep" }, { 0x0c, 0x0083, "RecallLast" }, { 0x0c, 0x0084, "EnterChannel" }, { 0x0c, 0x0085, "OrderMovie" }, { 0x0c, 0x0086, "Channel" }, { 0x0c, 0x0087, "MediaSelection" }, { 0x0c, 0x0088, "MediaSelectComputer" }, { 0x0c, 0x0089, "MediaSelectTV" }, { 0x0c, 0x008a, "MediaSelectWWW" }, { 0x0c, 0x008b, "MediaSelectDVD" }, { 0x0c, 0x008c, "MediaSelectTelephone" }, { 0x0c, 0x008d, "MediaSelectProgramGuide" }, { 0x0c, 0x008e, "MediaSelectVideoPhone" }, { 0x0c, 0x008f, "MediaSelectGames" }, { 0x0c, 0x0090, "MediaSelectMessages" }, { 0x0c, 0x0091, "MediaSelectCD" }, { 0x0c, 0x0092, "MediaSelectVCR" }, { 0x0c, 0x0093, "MediaSelectTuner" }, { 0x0c, 0x0094, "Quit" }, { 0x0c, 0x0095, "Help" }, { 0x0c, 0x0096, "MediaSelectTape" }, { 0x0c, 0x0097, "MediaSelectCable" }, { 0x0c, 0x0098, "MediaSelectSatellite" }, { 0x0c, 0x0099, "MediaSelectSecurity" }, { 0x0c, 0x009a, "MediaSelectHome" }, { 0x0c, 0x009b, "MediaSelectCall" }, { 0x0c, 0x009c, "ChannelIncrement" }, { 0x0c, 0x009d, "ChannelDecrement" }, { 0x0c, 0x009e, "MediaSelectSAP" }, { 0x0c, 0x00a0, "VCRPlus" }, { 0x0c, 0x00a1, "Once" }, { 0x0c, 0x00a2, "Daily" }, { 0x0c, 0x00a3, "Weekly" }, { 0x0c, 0x00a4, "Monthly" }, { 0x0c, 0x00b0, "Play" }, { 0x0c, 0x00b1, "Pause" }, { 0x0c, 0x00b2, "Record" }, { 0x0c, 0x00b3, "FastForward" }, { 0x0c, 0x00b4, "Rewind" }, { 0x0c, 0x00b5, "ScanNextTrack" }, { 0x0c, 0x00b6, "ScanPreviousTrack" }, { 0x0c, 0x00b7, "Stop" }, { 0x0c, 0x00b8, "Eject" }, { 0x0c, 0x00b9, "RandomPlay" }, { 0x0c, 0x00ba, "SelectDisc" }, { 0x0c, 0x00bb, "EnterDisc" }, { 0x0c, 0x00bc, "Repeat" }, { 0x0c, 0x00bd, "Tracking" }, { 0x0c, 0x00be, "TrackNormal" }, { 0x0c, 0x00bf, "SlowTracking" }, { 0x0c, 0x00c0, "FrameForward" }, { 0x0c, 0x00c1, "FrameBack" }, { 0x0c, 0x00c2, "Mark" }, { 0x0c, 0x00c3, "ClearMark" }, { 0x0c, 0x00c4, "RepeatFromMark" }, { 0x0c, 0x00c5, "ReturnToMark" }, { 0x0c, 0x00c6, "SearchMarkForward" }, { 0x0c, 0x00c7, "SearchMarkBackwards" }, { 0x0c, 0x00c8, "CounterReset" }, { 0x0c, 0x00c9, "ShowCounter" }, { 0x0c, 0x00ca, "TrackingIncrement" }, { 0x0c, 0x00cb, "TrackingDecrement" }, { 0x0c, 0x00cc, "StopEject" }, { 0x0c, 0x00cd, "PlayPause" }, { 0x0c, 0x00ce, "PlaySkip" }, { 0x0c, 0x00cf, "VoiceCommand" }, { 0x0c, 0x00d0, "InvokeCaptureInterface" }, { 0x0c, 0x00d1, "StartorStopGameRecording" }, { 0x0c, 0x00d2, "HistoricalGameCapture" }, { 0x0c, 0x00d3, "CaptureGameScreenshot" }, { 0x0c, 0x00d4, "ShoworHideRecordingIndicator" }, { 0x0c, 0x00d5, "StartorStopMicrophoneCapture" }, { 0x0c, 0x00d6, "StartorStopCameraCapture" }, { 0x0c, 0x00d7, "StartorStopGameBroadcast" }, { 0x0c, 0x00d8, "StartorStopVoiceDictationSession" }, { 0x0c, 0x00d9, "InvokeDismissEmojiPicker" }, { 0x0c, 0x00e0, "Volume" }, { 0x0c, 0x00e1, "Balance" }, { 0x0c, 0x00e2, "Mute" }, { 0x0c, 0x00e3, "Bass" }, { 0x0c, 0x00e4, "Treble" }, { 0x0c, 0x00e5, "BassBoost" }, { 0x0c, 0x00e6, "SurroundMode" }, { 0x0c, 0x00e7, "Loudness" }, { 0x0c, 0x00e8, "MPX" }, { 0x0c, 0x00e9, "VolumeIncrement" }, { 0x0c, 0x00ea, "VolumeDecrement" }, { 0x0c, 0x00f0, "SpeedSelect" }, { 0x0c, 0x00f1, "PlaybackSpeed" }, { 0x0c, 0x00f2, "StandardPlay" }, { 0x0c, 0x00f3, "LongPlay" }, { 0x0c, 0x00f4, "ExtendedPlay" }, { 0x0c, 0x00f5, "Slow" }, { 0x0c, 0x0100, "FanEnable" }, { 0x0c, 0x0101, "FanSpeed" }, { 0x0c, 0x0102, "LightEnable" }, { 0x0c, 0x0103, "LightIlluminationLevel" }, { 0x0c, 0x0104, "ClimateControlEnable" }, { 0x0c, 0x0105, "RoomTemperature" }, { 0x0c, 0x0106, "SecurityEnable" }, { 0x0c, 0x0107, "FireAlarm" }, { 0x0c, 0x0108, "PoliceAlarm" }, { 0x0c, 0x0109, "Proximity" }, { 0x0c, 0x010a, "Motion" }, { 0x0c, 0x010b, "DuressAlarm" }, { 0x0c, 0x010c, "HoldupAlarm" }, { 0x0c, 0x010d, "MedicalAlarm" }, { 0x0c, 0x0150, "BalanceRight" }, { 0x0c, 0x0151, "BalanceLeft" }, { 0x0c, 0x0152, "BassIncrement" }, { 0x0c, 0x0153, "BassDecrement" }, { 0x0c, 0x0154, "TrebleIncrement" }, { 0x0c, 0x0155, "TrebleDecrement" }, { 0x0c, 0x0160, "SpeakerSystem" }, { 0x0c, 0x0161, "ChannelLeft" }, { 0x0c, 0x0162, "ChannelRight" }, { 0x0c, 0x0163, "ChannelCenter" }, { 0x0c, 0x0164, "ChannelFront" }, { 0x0c, 0x0165, "ChannelCenterFront" }, { 0x0c, 0x0166, "ChannelSide" }, { 0x0c, 0x0167, "ChannelSurround" }, { 0x0c, 0x0168, "ChannelLowFrequencyEnhancement" }, { 0x0c, 0x0169, "ChannelTop" }, { 0x0c, 0x016a, "ChannelUnknown" }, { 0x0c, 0x0170, "Subchannel" }, { 0x0c, 0x0171, "SubchannelIncrement" }, { 0x0c, 0x0172, "SubchannelDecrement" }, { 0x0c, 0x0173, "AlternateAudioIncrement" }, { 0x0c, 0x0174, "AlternateAudioDecrement" }, { 0x0c, 0x0180, "ApplicationLaunchButtons" }, { 0x0c, 0x0181, "ALLaunchButtonConfigurationTool" }, { 0x0c, 0x0182, "ALProgrammableButtonConfiguration" }, { 0x0c, 0x0183, "ALConsumerControlConfiguration" }, { 0x0c, 0x0184, "ALWordProcessor" }, { 0x0c, 0x0185, "ALTextEditor" }, { 0x0c, 0x0186, "ALSpreadsheet" }, { 0x0c, 0x0187, "ALGraphicsEditor" }, { 0x0c, 0x0188, "ALPresentationApp" }, { 0x0c, 0x0189, "ALDatabaseApp" }, { 0x0c, 0x018a, "ALEmailReader" }, { 0x0c, 0x018b, "ALNewsreader" }, { 0x0c, 0x018c, "ALVoicemail" }, { 0x0c, 0x018d, "ALContactsAddressBook" }, { 0x0c, 0x018e, "ALCalendarSchedule" }, { 0x0c, 0x018f, "ALTaskProjectManager" }, { 0x0c, 0x0190, "ALLogJournalTimecard" }, { 0x0c, 0x0191, "ALCheckbookFinance" }, { 0x0c, 0x0192, "ALCalculator" }, { 0x0c, 0x0193, "ALAVCapturePlayback" }, { 0x0c, 0x0194, "ALLocalMachineBrowser" }, { 0x0c, 0x0195, "ALLANWANBrowser" }, { 0x0c, 0x0196, "ALInternetBrowser" }, { 0x0c, 0x0197, "ALRemoteNetworkingISPConnect" }, { 0x0c, 0x0198, "ALNetworkConference" }, { 0x0c, 0x0199, "ALNetworkChat" }, { 0x0c, 0x019a, "ALTelephonyDialer" }, { 0x0c, 0x019b, "ALLogon" }, { 0x0c, 0x019c, "ALLogoff" }, { 0x0c, 0x019d, "ALLogonLogoff" }, { 0x0c, 0x019e, "ALTerminalLockScreensaver" }, { 0x0c, 0x019f, "ALControlPanel" }, { 0x0c, 0x01a0, "ALCommandLineProcessorRun" }, { 0x0c, 0x01a1, "ALProcessTaskManager" }, { 0x0c, 0x01a2, "ALSelectTaskApplication" }, { 0x0c, 0x01a3, "ALNextTaskApplication" }, { 0x0c, 0x01a4, "ALPreviousTaskApplication" }, { 0x0c, 0x01a5, "ALPreemptiveHaltTaskApplication" }, { 0x0c, 0x01a6, "ALIntegratedHelpCenter" }, { 0x0c, 0x01a7, "ALDocuments" }, { 0x0c, 0x01a8, "ALThesaurus" }, { 0x0c, 0x01a9, "ALDictionary" }, { 0x0c, 0x01aa, "ALDesktop" }, { 0x0c, 0x01ab, "ALSpellCheck" }, { 0x0c, 0x01ac, "ALGrammarCheck" }, { 0x0c, 0x01ad, "ALWirelessStatus" }, { 0x0c, 0x01ae, "ALKeyboardLayout" }, { 0x0c, 0x01af, "ALVirusProtection" }, { 0x0c, 0x01b0, "ALEncryption" }, { 0x0c, 0x01b1, "ALScreenSaver" }, { 0x0c, 0x01b2, "ALAlarms" }, { 0x0c, 0x01b3, "ALClock" }, { 0x0c, 0x01b4, "ALFileBrowser" }, { 0x0c, 0x01b5, "ALPowerStatus" }, { 0x0c, 0x01b6, "ALImageBrowser" }, { 0x0c, 0x01b7, "ALAudioBrowser" }, { 0x0c, 0x01b8, "ALMovieBrowser" }, { 0x0c, 0x01b9, "ALDigitalRightsManager" }, { 0x0c, 0x01ba, "ALDigitalWallet" }, { 0x0c, 0x01bc, "ALInstantMessaging" }, { 0x0c, 0x01bd, "ALOEMFeaturesTipsTutorialBrowser" }, { 0x0c, 0x01be, "ALOEMHelp" }, { 0x0c, 0x01bf, "ALOnlineCommunity" }, { 0x0c, 0x01c0, "ALEntertainmentContentBrowser" }, { 0x0c, 0x01c1, "ALOnlineShoppingBrowser" }, { 0x0c, 0x01c2, "ALSmartCardInformationHelp" }, { 0x0c, 0x01c3, "ALMarketMonitorFinanceBrowser" }, { 0x0c, 0x01c4, "ALCustomizedCorporateNewsBrowser" }, { 0x0c, 0x01c5, "ALOnlineActivityBrowser" }, { 0x0c, 0x01c6, "ALResearchSearchBrowser" }, { 0x0c, 0x01c7, "ALAudioPlayer" }, { 0x0c, 0x01c8, "ALMessageStatus" }, { 0x0c, 0x01c9, "ALContactSync" }, { 0x0c, 0x01ca, "ALNavigation" }, { 0x0c, 0x01cb, "ALContextawareDesktopAssistant" }, { 0x0c, 0x0200, "GenericGUIApplicationControls" }, { 0x0c, 0x0201, "ACNew" }, { 0x0c, 0x0202, "ACOpen" }, { 0x0c, 0x0203, "ACClose" }, { 0x0c, 0x0204, "ACExit" }, { 0x0c, 0x0205, "ACMaximize" }, { 0x0c, 0x0206, "ACMinimize" }, { 0x0c, 0x0207, "ACSave" }, { 0x0c, 0x0208, "ACPrint" }, { 0x0c, 0x0209, "ACProperties" }, { 0x0c, 0x021a, "ACUndo" }, { 0x0c, 0x021b, "ACCopy" }, { 0x0c, 0x021c, "ACCut" }, { 0x0c, 0x021d, "ACPaste" }, { 0x0c, 0x021e, "ACSelectAll" }, { 0x0c, 0x021f, "ACFind" }, { 0x0c, 0x0220, "ACFindandReplace" }, { 0x0c, 0x0221, "ACSearch" }, { 0x0c, 0x0222, "ACGoTo" }, { 0x0c, 0x0223, "ACHome" }, { 0x0c, 0x0224, "ACBack" }, { 0x0c, 0x0225, "ACForward" }, { 0x0c, 0x0226, "ACStop" }, { 0x0c, 0x0227, "ACRefresh" }, { 0x0c, 0x0228, "ACPreviousLink" }, { 0x0c, 0x0229, "ACNextLink" }, { 0x0c, 0x022a, "ACBookmarks" }, { 0x0c, 0x022b, "ACHistory" }, { 0x0c, 0x022c, "ACSubscriptions" }, { 0x0c, 0x022d, "ACZoomIn" }, { 0x0c, 0x022e, "ACZoomOut" }, { 0x0c, 0x022f, "ACZoom" }, { 0x0c, 0x0230, "ACFullScreenView" }, { 0x0c, 0x0231, "ACNormalView" }, { 0x0c, 0x0232, "ACViewToggle" }, { 0x0c, 0x0233, "ACScrollUp" }, { 0x0c, 0x0234, "ACScrollDown" }, { 0x0c, 0x0235, "ACScroll" }, { 0x0c, 0x0236, "ACPanLeft" }, { 0x0c, 0x0237, "ACPanRight" }, { 0x0c, 0x0238, "ACPan" }, { 0x0c, 0x0239, "ACNewWindow" }, { 0x0c, 0x023a, "ACTileHorizontally" }, { 0x0c, 0x023b, "ACTileVertically" }, { 0x0c, 0x023c, "ACFormat" }, { 0x0c, 0x023d, "ACEdit" }, { 0x0c, 0x023e, "ACBold" }, { 0x0c, 0x023f, "ACItalics" }, { 0x0c, 0x0240, "ACUnderline" }, { 0x0c, 0x0241, "ACStrikethrough" }, { 0x0c, 0x0242, "ACSubscript" }, { 0x0c, 0x0243, "ACSuperscript" }, { 0x0c, 0x0244, "ACAllCaps" }, { 0x0c, 0x0245, "ACRotate" }, { 0x0c, 0x0246, "ACResize" }, { 0x0c, 0x0247, "ACFlipHorizontal" }, { 0x0c, 0x0248, "ACFlipVertical" }, { 0x0c, 0x0249, "ACMirrorHorizontal" }, { 0x0c, 0x024a, "ACMirrorVertical" }, { 0x0c, 0x024b, "ACFontSelect" }, { 0x0c, 0x024c, "ACFontColor" }, { 0x0c, 0x024d, "ACFontSize" }, { 0x0c, 0x024e, "ACJustifyLeft" }, { 0x0c, 0x024f, "ACJustifyCenterH" }, { 0x0c, 0x0250, "ACJustifyRight" }, { 0x0c, 0x0251, "ACJustifyBlockH" }, { 0x0c, 0x0252, "ACJustifyTop" }, { 0x0c, 0x0253, "ACJustifyCenterV" }, { 0x0c, 0x0254, "ACJustifyBottom" }, { 0x0c, 0x0255, "ACJustifyBlockV" }, { 0x0c, 0x0256, "ACIndentDecrease" }, { 0x0c, 0x0257, "ACIndentIncrease" }, { 0x0c, 0x0258, "ACNumberedList" }, { 0x0c, 0x0259, "ACRestartNumbering" }, { 0x0c, 0x025a, "ACBulletedList" }, { 0x0c, 0x025b, "ACPromote" }, { 0x0c, 0x025c, "ACDemote" }, { 0x0c, 0x025d, "ACYes" }, { 0x0c, 0x025e, "ACNo" }, { 0x0c, 0x025f, "ACCancel" }, { 0x0c, 0x0260, "ACCatalog" }, { 0x0c, 0x0261, "ACBuyCheckout" }, { 0x0c, 0x0262, "ACAddtoCart" }, { 0x0c, 0x0263, "ACExpand" }, { 0x0c, 0x0264, "ACExpandAll" }, { 0x0c, 0x0265, "ACCollapse" }, { 0x0c, 0x0266, "ACCollapseAll" }, { 0x0c, 0x0267, "ACPrintPreview" }, { 0x0c, 0x0268, "ACPasteSpecial" }, { 0x0c, 0x0269, "ACInsertMode" }, { 0x0c, 0x026a, "ACDelete" }, { 0x0c, 0x026b, "ACLock" }, { 0x0c, 0x026c, "ACUnlock" }, { 0x0c, 0x026d, "ACProtect" }, { 0x0c, 0x026e, "ACUnprotect" }, { 0x0c, 0x026f, "ACAttachComment" }, { 0x0c, 0x0270, "ACDeleteComment" }, { 0x0c, 0x0271, "ACViewComment" }, { 0x0c, 0x0272, "ACSelectWord" }, { 0x0c, 0x0273, "ACSelectSentence" }, { 0x0c, 0x0274, "ACSelectParagraph" }, { 0x0c, 0x0275, "ACSelectColumn" }, { 0x0c, 0x0276, "ACSelectRow" }, { 0x0c, 0x0277, "ACSelectTable" }, { 0x0c, 0x0278, "ACSelectObject" }, { 0x0c, 0x0279, "ACRedoRepeat" }, { 0x0c, 0x027a, "ACSort" }, { 0x0c, 0x027b, "ACSortAscending" }, { 0x0c, 0x027c, "ACSortDescending" }, { 0x0c, 0x027d, "ACFilter" }, { 0x0c, 0x027e, "ACSetClock" }, { 0x0c, 0x027f, "ACViewClock" }, { 0x0c, 0x0280, "ACSelectTimeZone" }, { 0x0c, 0x0281, "ACEditTimeZones" }, { 0x0c, 0x0282, "ACSetAlarm" }, { 0x0c, 0x0283, "ACClearAlarm" }, { 0x0c, 0x0284, "ACSnoozeAlarm" }, { 0x0c, 0x0285, "ACResetAlarm" }, { 0x0c, 0x0286, "ACSynchronize" }, { 0x0c, 0x0287, "ACSendReceive" }, { 0x0c, 0x0288, "ACSendTo" }, { 0x0c, 0x0289, "ACReply" }, { 0x0c, 0x028a, "ACReplyAll" }, { 0x0c, 0x028b, "ACForwardMsg" }, { 0x0c, 0x028c, "ACSend" }, { 0x0c, 0x028d, "ACAttachFile" }, { 0x0c, 0x028e, "ACUpload" }, { 0x0c, 0x028f, "ACDownloadSaveTargetAs" }, { 0x0c, 0x0290, "ACSetBorders" }, { 0x0c, 0x0291, "ACInsertRow" }, { 0x0c, 0x0292, "ACInsertColumn" }, { 0x0c, 0x0293, "ACInsertFile" }, { 0x0c, 0x0294, "ACInsertPicture" }, { 0x0c, 0x0295, "ACInsertObject" }, { 0x0c, 0x0296, "ACInsertSymbol" }, { 0x0c, 0x0297, "ACSaveandClose" }, { 0x0c, 0x0298, "ACRename" }, { 0x0c, 0x0299, "ACMerge" }, { 0x0c, 0x029a, "ACSplit" }, { 0x0c, 0x029b, "ACDisributeHorizontally" }, { 0x0c, 0x029c, "ACDistributeVertically" }, { 0x0c, 0x029d, "ACNextKeyboardLayoutSelect" }, { 0x0c, 0x029e, "ACNavigationGuidance" }, { 0x0c, 0x029f, "ACDesktopShowAllWindows" }, { 0x0c, 0x02a0, "ACSoftKeyLeft" }, { 0x0c, 0x02a1, "ACSoftKeyRight" }, { 0x0c, 0x02a2, "ACDesktopShowAllApplications" }, { 0x0c, 0x02b0, "ACIdleKeepAlive" }, { 0x0c, 0x02c0, "ExtendedKeyboardAttributesCollection" }, { 0x0c, 0x02c1, "KeyboardFormFactor" }, { 0x0c, 0x02c2, "KeyboardKeyType" }, { 0x0c, 0x02c3, "KeyboardPhysicalLayout" }, { 0x0c, 0x02c4, "VendorSpecificKeyboardPhysicalLayout" }, { 0x0c, 0x02c5, "KeyboardIETFLanguageTagIndex" }, { 0x0c, 0x02c6, "ImplementedKeyboardInputAssistControls" }, { 0x0c, 0x02c7, "KeyboardInputAssistPrevious" }, { 0x0c, 0x02c8, "KeyboardInputAssistNext" }, { 0x0c, 0x02c9, "KeyboardInputAssistPreviousGroup" }, { 0x0c, 0x02ca, "KeyboardInputAssistNextGroup" }, { 0x0c, 0x02cb, "KeyboardInputAssistAccept" }, { 0x0c, 0x02cc, "KeyboardInputAssistCancel" }, { 0x0c, 0x02d0, "PrivacyScreenToggle" }, { 0x0c, 0x02d1, "PrivacyScreenLevelDecrement" }, { 0x0c, 0x02d2, "PrivacyScreenLevelIncrement" }, { 0x0c, 0x02d3, "PrivacyScreenLevelMinimum" }, { 0x0c, 0x02d4, "PrivacyScreenLevelMaximum" }, { 0x0c, 0x0500, "ContactEdited" }, { 0x0c, 0x0501, "ContactAdded" }, { 0x0c, 0x0502, "ContactRecordActive" }, { 0x0c, 0x0503, "ContactIndex" }, { 0x0c, 0x0504, "ContactNickname" }, { 0x0c, 0x0505, "ContactFirstName" }, { 0x0c, 0x0506, "ContactLastName" }, { 0x0c, 0x0507, "ContactFullName" }, { 0x0c, 0x0508, "ContactPhoneNumberPersonal" }, { 0x0c, 0x0509, "ContactPhoneNumberBusiness" }, { 0x0c, 0x050a, "ContactPhoneNumberMobile" }, { 0x0c, 0x050b, "ContactPhoneNumberPager" }, { 0x0c, 0x050c, "ContactPhoneNumberFax" }, { 0x0c, 0x050d, "ContactPhoneNumberOther" }, { 0x0c, 0x050e, "ContactEmailPersonal" }, { 0x0c, 0x050f, "ContactEmailBusiness" }, { 0x0c, 0x0510, "ContactEmailOther" }, { 0x0c, 0x0511, "ContactEmailMain" }, { 0x0c, 0x0512, "ContactSpeedDialNumber" }, { 0x0c, 0x0513, "ContactStatusFlag" }, { 0x0c, 0x0514, "ContactMisc" }, { 0x0d, 0, "Digitizers" }, { 0x0d, 0x0001, "Digitizer" }, { 0x0d, 0x0002, "Pen" }, { 0x0d, 0x0003, "LightPen" }, { 0x0d, 0x0004, "TouchScreen" }, { 0x0d, 0x0005, "TouchPad" }, { 0x0d, 0x0006, "Whiteboard" }, { 0x0d, 0x0007, "CoordinateMeasuringMachine" }, { 0x0d, 0x0008, "3DDigitizer" }, { 0x0d, 0x0009, "StereoPlotter" }, { 0x0d, 0x000a, "ArticulatedArm" }, { 0x0d, 0x000b, "Armature" }, { 0x0d, 0x000c, "MultiplePointDigitizer" }, { 0x0d, 0x000d, "FreeSpaceWand" }, { 0x0d, 0x000e, "DeviceConfiguration" }, { 0x0d, 0x000f, "CapacitiveHeatMapDigitizer" }, { 0x0d, 0x0020, "Stylus" }, { 0x0d, 0x0021, "Puck" }, { 0x0d, 0x0022, "Finger" }, { 0x0d, 0x0023, "Devicesettings" }, { 0x0d, 0x0024, "CharacterGesture" }, { 0x0d, 0x0030, "TipPressure" }, { 0x0d, 0x0031, "BarrelPressure" }, { 0x0d, 0x0032, "InRange" }, { 0x0d, 0x0033, "Touch" }, { 0x0d, 0x0034, "Untouch" }, { 0x0d, 0x0035, "Tap" }, { 0x0d, 0x0036, "Quality" }, { 0x0d, 0x0037, "DataValid" }, { 0x0d, 0x0038, "TransducerIndex" }, { 0x0d, 0x0039, "TabletFunctionKeys" }, { 0x0d, 0x003a, "ProgramChangeKeys" }, { 0x0d, 0x003b, "BatteryStrength" }, { 0x0d, 0x003c, "Invert" }, { 0x0d, 0x003d, "XTilt" }, { 0x0d, 0x003e, "YTilt" }, { 0x0d, 0x003f, "Azimuth" }, { 0x0d, 0x0040, "Altitude" }, { 0x0d, 0x0041, "Twist" }, { 0x0d, 0x0042, "TipSwitch" }, { 0x0d, 0x0043, "SecondaryTipSwitch" }, { 0x0d, 0x0044, "BarrelSwitch" }, { 0x0d, 0x0045, "Eraser" }, { 0x0d, 0x0046, "TabletPick" }, { 0x0d, 0x0047, "TouchValid" }, { 0x0d, 0x0048, "Width" }, { 0x0d, 0x0049, "Height" }, { 0x0d, 0x0051, "ContactIdentifier" }, { 0x0d, 0x0052, "DeviceMode" }, { 0x0d, 0x0053, "DeviceIdentifier" }, { 0x0d, 0x0054, "ContactCount" }, { 0x0d, 0x0055, "ContactCountMaximum" }, { 0x0d, 0x0056, "ScanTime" }, { 0x0d, 0x0057, "SurfaceSwitch" }, { 0x0d, 0x0058, "ButtonSwitch" }, { 0x0d, 0x0059, "PadType" }, { 0x0d, 0x005a, "SecondaryBarrelSwitch" }, { 0x0d, 0x005b, "TransducerSerialNumber" }, { 0x0d, 0x005c, "PreferredColor" }, { 0x0d, 0x005d, "PreferredColorisLocked" }, { 0x0d, 0x005e, "PreferredLineWidth" }, { 0x0d, 0x005f, "PreferredLineWidthisLocked" }, { 0x0d, 0x0060, "LatencyMode" }, { 0x0d, 0x0061, "GestureCharacterQuality" }, { 0x0d, 0x0062, "CharacterGestureDataLength" }, { 0x0d, 0x0063, "CharacterGestureData" }, { 0x0d, 0x0064, "GestureCharacterEncoding" }, { 0x0d, 0x0065, "UTF8CharacterGestureEncoding" }, { 0x0d, 0x0066, "UTF16LittleEndianCharacterGestureEncoding" }, { 0x0d, 0x0067, "UTF16BigEndianCharacterGestureEncoding" }, { 0x0d, 0x0068, "UTF32LittleEndianCharacterGestureEncoding" }, { 0x0d, 0x0069, "UTF32BigEndianCharacterGestureEncoding" }, { 0x0d, 0x006a, "CapacitiveHeatMapProtocolVendorID" }, { 0x0d, 0x006b, "CapacitiveHeatMapProtocolVersion" }, { 0x0d, 0x006c, "CapacitiveHeatMapFrameData" }, { 0x0d, 0x006d, "GestureCharacterEnable" }, { 0x0d, 0x006e, "TransducerSerialNumberPart2" }, { 0x0d, 0x006f, "NoPreferredColor" }, { 0x0d, 0x0070, "PreferredLineStyle" }, { 0x0d, 0x0071, "PreferredLineStyleisLocked" }, { 0x0d, 0x0072, "Ink" }, { 0x0d, 0x0073, "Pencil" }, { 0x0d, 0x0074, "Highlighter" }, { 0x0d, 0x0075, "ChiselMarker" }, { 0x0d, 0x0076, "Brush" }, { 0x0d, 0x0077, "NoPreference" }, { 0x0d, 0x0080, "DigitizerDiagnostic" }, { 0x0d, 0x0081, "DigitizerError" }, { 0x0d, 0x0082, "ErrNormalStatus" }, { 0x0d, 0x0083, "ErrTransducersExceeded" }, { 0x0d, 0x0084, "ErrFullTransFeaturesUnavailable" }, { 0x0d, 0x0085, "ErrChargeLow" }, { 0x0d, 0x0090, "TransducerSoftwareInfo" }, { 0x0d, 0x0091, "TransducerVendorId" }, { 0x0d, 0x0092, "TransducerProductId" }, { 0x0d, 0x0093, "DeviceSupportedProtocols" }, { 0x0d, 0x0094, "TransducerSupportedProtocols" }, { 0x0d, 0x0095, "NoProtocol" }, { 0x0d, 0x0096, "WacomAESProtocol" }, { 0x0d, 0x0097, "USIProtocol" }, { 0x0d, 0x0098, "MicrosoftPenProtocol" }, { 0x0d, 0x00a0, "SupportedReportRates" }, { 0x0d, 0x00a1, "ReportRate" }, { 0x0d, 0x00a2, "TransducerConnected" }, { 0x0d, 0x00a3, "SwitchDisabled" }, { 0x0d, 0x00a4, "SwitchUnimplemented" }, { 0x0d, 0x00a5, "TransducerSwitches" }, { 0x0d, 0x00a6, "TransducerIndexSelector" }, { 0x0d, 0x00b0, "ButtonPressThreshold" }, { 0x0e, 0, "Haptics" }, { 0x0e, 0x0001, "SimpleHapticController" }, { 0x0e, 0x0010, "WaveformList" }, { 0x0e, 0x0011, "DurationList" }, { 0x0e, 0x0020, "AutoTrigger" }, { 0x0e, 0x0021, "ManualTrigger" }, { 0x0e, 0x0022, "AutoTriggerAssociatedControl" }, { 0x0e, 0x0023, "Intensity" }, { 0x0e, 0x0024, "RepeatCount" }, { 0x0e, 0x0025, "RetriggerPeriod" }, { 0x0e, 0x0026, "WaveformVendorPage" }, { 0x0e, 0x0027, "WaveformVendorID" }, { 0x0e, 0x0028, "WaveformCutoffTime" }, { 0x0e, 0x1001, "WaveformNone" }, { 0x0e, 0x1002, "WaveformStop" }, { 0x0e, 0x1003, "WaveformClick" }, { 0x0e, 0x1004, "WaveformBuzzContinuous" }, { 0x0e, 0x1005, "WaveformRumbleContinuous" }, { 0x0e, 0x1006, "WaveformPress" }, { 0x0e, 0x1007, "WaveformRelease" }, { 0x0e, 0x1008, "WaveformHover" }, { 0x0e, 0x1009, "WaveformSuccess" }, { 0x0e, 0x100a, "WaveformError" }, { 0x0e, 0x100b, "WaveformInkContinuous" }, { 0x0e, 0x100c, "WaveformPencilContinuous" }, { 0x0e, 0x100d, "WaveformMarkerContinuous" }, { 0x0e, 0x100e, "WaveformChiselMarkerContinuous" }, { 0x0e, 0x100f, "WaveformBrushContinuous" }, { 0x0e, 0x1010, "WaveformEraserContinuous" }, { 0x0e, 0x1011, "WaveformSparkleContinuous" }, { 0x0f, 0, "PhysicalInputDevice" }, { 0x0f, 0x0001, "PhysicalInputDevice" }, { 0x0f, 0x0020, "Normal" }, { 0x0f, 0x0021, "SetEffectReport" }, { 0x0f, 0x0022, "EffectParameterBlockIndex" }, { 0x0f, 0x0023, "ParameterBlockOffset" }, { 0x0f, 0x0024, "ROMFlag" }, { 0x0f, 0x0025, "EffectType" }, { 0x0f, 0x0026, "ETConstantForce" }, { 0x0f, 0x0027, "ETRamp" }, { 0x0f, 0x0028, "ETCustomForce" }, { 0x0f, 0x0030, "ETSquare" }, { 0x0f, 0x0031, "ETSine" }, { 0x0f, 0x0032, "ETTriangle" }, { 0x0f, 0x0033, "ETSawtoothUp" }, { 0x0f, 0x0034, "ETSawtoothDown" }, { 0x0f, 0x0040, "ETSpring" }, { 0x0f, 0x0041, "ETDamper" }, { 0x0f, 0x0042, "ETInertia" }, { 0x0f, 0x0043, "ETFriction" }, { 0x0f, 0x0050, "Duration" }, { 0x0f, 0x0051, "SamplePeriod" }, { 0x0f, 0x0052, "Gain" }, { 0x0f, 0x0053, "TriggerButton" }, { 0x0f, 0x0054, "TriggerRepeatInterval" }, { 0x0f, 0x0055, "AxesEnable" }, { 0x0f, 0x0056, "DirectionEnable" }, { 0x0f, 0x0057, "Direction" }, { 0x0f, 0x0058, "TypeSpecificBlockOffset" }, { 0x0f, 0x0059, "BlockType" }, { 0x0f, 0x005a, "SetEnvelopeReport" }, { 0x0f, 0x005b, "AttackLevel" }, { 0x0f, 0x005c, "AttackTime" }, { 0x0f, 0x005d, "FadeLevel" }, { 0x0f, 0x005e, "FadeTime" }, { 0x0f, 0x005f, "SetConditionReport" }, { 0x0f, 0x0060, "CenterPointOffset" }, { 0x0f, 0x0061, "PositiveCoefficient" }, { 0x0f, 0x0062, "NegativeCoefficient" }, { 0x0f, 0x0063, "PositiveSaturation" }, { 0x0f, 0x0064, "NegativeSaturation" }, { 0x0f, 0x0065, "DeadBand" }, { 0x0f, 0x0066, "DownloadForceSample" }, { 0x0f, 0x0067, "IsochCustomForceEnable" }, { 0x0f, 0x0068, "CustomForceDataReport" }, { 0x0f, 0x0069, "CustomForceData" }, { 0x0f, 0x006a, "CustomForceVendorDefinedData" }, { 0x0f, 0x006b, "SetCustomForceReport" }, { 0x0f, 0x006c, "CustomForceDataOffset" }, { 0x0f, 0x006d, "SampleCount" }, { 0x0f, 0x006e, "SetPeriodicReport" }, { 0x0f, 0x006f, "Offset" }, { 0x0f, 0x0070, "Magnitude" }, { 0x0f, 0x0071, "Phase" }, { 0x0f, 0x0072, "Period" }, { 0x0f, 0x0073, "SetConstantForceReport" }, { 0x0f, 0x0074, "SetRampForceReport" }, { 0x0f, 0x0075, "RampStart" }, { 0x0f, 0x0076, "RampEnd" }, { 0x0f, 0x0077, "EffectOperationReport" }, { 0x0f, 0x0078, "EffectOperation" }, { 0x0f, 0x0079, "OpEffectStart" }, { 0x0f, 0x007a, "OpEffectStartSolo" }, { 0x0f, 0x007b, "OpEffectStop" }, { 0x0f, 0x007c, "LoopCount" }, { 0x0f, 0x007d, "DeviceGainReport" }, { 0x0f, 0x007e, "DeviceGain" }, { 0x0f, 0x007f, "ParameterBlockPoolsReport" }, { 0x0f, 0x0080, "RAMPoolSize" }, { 0x0f, 0x0081, "ROMPoolSize" }, { 0x0f, 0x0082, "ROMEffectBlockCount" }, { 0x0f, 0x0083, "SimultaneousEffectsMax" }, { 0x0f, 0x0084, "PoolAlignment" }, { 0x0f, 0x0085, "ParameterBlockMoveReport" }, { 0x0f, 0x0086, "MoveSource" }, { 0x0f, 0x0087, "MoveDestination" }, { 0x0f, 0x0088, "MoveLength" }, { 0x0f, 0x0089, "EffectParameterBlockLoadReport" }, { 0x0f, 0x008b, "EffectParameterBlockLoadStatus" }, { 0x0f, 0x008c, "BlockLoadSuccess" }, { 0x0f, 0x008d, "BlockLoadFull" }, { 0x0f, 0x008e, "BlockLoadError" }, { 0x0f, 0x008f, "BlockHandle" }, { 0x0f, 0x0090, "EffectParameterBlockFreeReport" }, { 0x0f, 0x0091, "TypeSpecificBlockHandle" }, { 0x0f, 0x0092, "PIDStateReport" }, { 0x0f, 0x0094, "EffectPlaying" }, { 0x0f, 0x0095, "PIDDeviceControlReport" }, { 0x0f, 0x0096, "PIDDeviceControl" }, { 0x0f, 0x0097, "DCEnableActuators" }, { 0x0f, 0x0098, "DCDisableActuators" }, { 0x0f, 0x0099, "DCStopAllEffects" }, { 0x0f, 0x009a, "DCReset" }, { 0x0f, 0x009b, "DCPause" }, { 0x0f, 0x009c, "DCContinue" }, { 0x0f, 0x009f, "DevicePaused" }, { 0x0f, 0x00a0, "ActuatorsEnabled" }, { 0x0f, 0x00a4, "SafetySwitch" }, { 0x0f, 0x00a5, "ActuatorOverrideSwitch" }, { 0x0f, 0x00a6, "ActuatorPower" }, { 0x0f, 0x00a7, "StartDelay" }, { 0x0f, 0x00a8, "ParameterBlockSize" }, { 0x0f, 0x00a9, "DeviceManagedPool" }, { 0x0f, 0x00aa, "SharedParameterBlocks" }, { 0x0f, 0x00ab, "CreateNewEffectParameterBlockReport" }, { 0x0f, 0x00ac, "RAMPoolAvailable" }, { 0x11, 0, "SoC" }, { 0x11, 0x0001, "SocControl" }, { 0x11, 0x0002, "FirmwareTransfer" }, { 0x11, 0x0003, "FirmwareFileId" }, { 0x11, 0x0004, "FileOffsetInBytes" }, { 0x11, 0x0005, "FileTransferSizeMaxInBytes" }, { 0x11, 0x0006, "FilePayload" }, { 0x11, 0x0007, "FilePayloadSizeInBytes" }, { 0x11, 0x0008, "FilePayloadContainsLastBytes" }, { 0x11, 0x0009, "FileTransferStop" }, { 0x11, 0x000a, "FileTransferTillEnd" }, { 0x12, 0, "EyeandHeadTrackers" }, { 0x12, 0x0001, "EyeTracker" }, { 0x12, 0x0002, "HeadTracker" }, { 0x12, 0x0010, "TrackingData" }, { 0x12, 0x0011, "Capabilities" }, { 0x12, 0x0012, "Configuration" }, { 0x12, 0x0013, "Status" }, { 0x12, 0x0014, "Control" }, { 0x12, 0x0020, "SensorTimestamp" }, { 0x12, 0x0021, "PositionX" }, { 0x12, 0x0022, "PositionY" }, { 0x12, 0x0023, "PositionZ" }, { 0x12, 0x0024, "GazePoint" }, { 0x12, 0x0025, "LeftEyePosition" }, { 0x12, 0x0026, "RightEyePosition" }, { 0x12, 0x0027, "HeadPosition" }, { 0x12, 0x0028, "HeadDirectionPoint" }, { 0x12, 0x0029, "RotationaboutXaxis" }, { 0x12, 0x002a, "RotationaboutYaxis" }, { 0x12, 0x002b, "RotationaboutZaxis" }, { 0x12, 0x0100, "TrackerQuality" }, { 0x12, 0x0101, "MinimumTrackingDistance" }, { 0x12, 0x0102, "OptimumTrackingDistance" }, { 0x12, 0x0103, "MaximumTrackingDistance" }, { 0x12, 0x0104, "MaximumScreenPlaneWidth" }, { 0x12, 0x0105, "MaximumScreenPlaneHeight" }, { 0x12, 0x0200, "DisplayManufacturerID" }, { 0x12, 0x0201, "DisplayProductID" }, { 0x12, 0x0202, "DisplaySerialNumber" }, { 0x12, 0x0203, "DisplayManufacturerDate" }, { 0x12, 0x0204, "CalibratedScreenWidth" }, { 0x12, 0x0205, "CalibratedScreenHeight" }, { 0x12, 0x0300, "SamplingFrequency" }, { 0x12, 0x0301, "ConfigurationStatus" }, { 0x12, 0x0400, "DeviceModeRequest" }, { 0x14, 0, "AuxiliaryDisplay" }, { 0x14, 0x0001, "AlphanumericDisplay" }, { 0x14, 0x0002, "AuxiliaryDisplay" }, { 0x14, 0x0020, "DisplayAttributesReport" }, { 0x14, 0x0021, "ASCIICharacterSet" }, { 0x14, 0x0022, "DataReadBack" }, { 0x14, 0x0023, "FontReadBack" }, { 0x14, 0x0024, "DisplayControlReport" }, { 0x14, 0x0025, "ClearDisplay" }, { 0x14, 0x0026, "DisplayEnable" }, { 0x14, 0x0027, "ScreenSaverDelay" }, { 0x14, 0x0028, "ScreenSaverEnable" }, { 0x14, 0x0029, "VerticalScroll" }, { 0x14, 0x002a, "HorizontalScroll" }, { 0x14, 0x002b, "CharacterReport" }, { 0x14, 0x002c, "DisplayData" }, { 0x14, 0x002d, "DisplayStatus" }, { 0x14, 0x002e, "StatNotReady" }, { 0x14, 0x002f, "StatReady" }, { 0x14, 0x0030, "ErrNotaloadablecharacter" }, { 0x14, 0x0031, "ErrFontdatacannotberead" }, { 0x14, 0x0032, "CursorPositionReport" }, { 0x14, 0x0033, "Row" }, { 0x14, 0x0034, "Column" }, { 0x14, 0x0035, "Rows" }, { 0x14, 0x0036, "Columns" }, { 0x14, 0x0037, "CursorPixelPositioning" }, { 0x14, 0x0038, "CursorMode" }, { 0x14, 0x0039, "CursorEnable" }, { 0x14, 0x003a, "CursorBlink" }, { 0x14, 0x003b, "FontReport" }, { 0x14, 0x003c, "FontData" }, { 0x14, 0x003d, "CharacterWidth" }, { 0x14, 0x003e, "CharacterHeight" }, { 0x14, 0x003f, "CharacterSpacingHorizontal" }, { 0x14, 0x0040, "CharacterSpacingVertical" }, { 0x14, 0x0041, "UnicodeCharacterSet" }, { 0x14, 0x0042, "Font7Segment" }, { 0x14, 0x0043, "7SegmentDirectMap" }, { 0x14, 0x0044, "Font14Segment" }, { 0x14, 0x0045, "14SegmentDirectMap" }, { 0x14, 0x0046, "DisplayBrightness" }, { 0x14, 0x0047, "DisplayContrast" }, { 0x14, 0x0048, "CharacterAttribute" }, { 0x14, 0x0049, "AttributeReadback" }, { 0x14, 0x004a, "AttributeData" }, { 0x14, 0x004b, "CharAttrEnhance" }, { 0x14, 0x004c, "CharAttrUnderline" }, { 0x14, 0x004d, "CharAttrBlink" }, { 0x14, 0x0080, "BitmapSizeX" }, { 0x14, 0x0081, "BitmapSizeY" }, { 0x14, 0x0082, "MaxBlitSize" }, { 0x14, 0x0083, "BitDepthFormat" }, { 0x14, 0x0084, "DisplayOrientation" }, { 0x14, 0x0085, "PaletteReport" }, { 0x14, 0x0086, "PaletteDataSize" }, { 0x14, 0x0087, "PaletteDataOffset" }, { 0x14, 0x0088, "PaletteData" }, { 0x14, 0x008a, "BlitReport" }, { 0x14, 0x008b, "BlitRectangleX1" }, { 0x14, 0x008c, "BlitRectangleY1" }, { 0x14, 0x008d, "BlitRectangleX2" }, { 0x14, 0x008e, "BlitRectangleY2" }, { 0x14, 0x008f, "BlitData" }, { 0x14, 0x0090, "SoftButton" }, { 0x14, 0x0091, "SoftButtonID" }, { 0x14, 0x0092, "SoftButtonSide" }, { 0x14, 0x0093, "SoftButtonOffset1" }, { 0x14, 0x0094, "SoftButtonOffset2" }, { 0x14, 0x0095, "SoftButtonReport" }, { 0x14, 0x00c2, "SoftKeys" }, { 0x14, 0x00cc, "DisplayDataExtensions" }, { 0x14, 0x00cf, "CharacterMapping" }, { 0x14, 0x00dd, "UnicodeEquivalent" }, { 0x14, 0x00df, "CharacterPageMapping" }, { 0x14, 0x00ff, "RequestReport" }, { 0x20, 0, "Sensors" }, { 0x20, 0x0001, "Sensor" }, { 0x20, 0x0010, "Biometric" }, { 0x20, 0x0011, "BiometricHumanPresence" }, { 0x20, 0x0012, "BiometricHumanProximity" }, { 0x20, 0x0013, "BiometricHumanTouch" }, { 0x20, 0x0014, "BiometricBloodPressure" }, { 0x20, 0x0015, "BiometricBodyTemperature" }, { 0x20, 0x0016, "BiometricHeartRate" }, { 0x20, 0x0017, "BiometricHeartRateVariability" }, { 0x20, 0x0018, "BiometricPeripheralOxygenSaturation" }, { 0x20, 0x0019, "BiometricRespiratoryRate" }, { 0x20, 0x0020, "Electrical" }, { 0x20, 0x0021, "ElectricalCapacitance" }, { 0x20, 0x0022, "ElectricalCurrent" }, { 0x20, 0x0023, "ElectricalPower" }, { 0x20, 0x0024, "ElectricalInductance" }, { 0x20, 0x0025, "ElectricalResistance" }, { 0x20, 0x0026, "ElectricalVoltage" }, { 0x20, 0x0027, "ElectricalPotentiometer" }, { 0x20, 0x0028, "ElectricalFrequency" }, { 0x20, 0x0029, "ElectricalPeriod" }, { 0x20, 0x0030, "Environmental" }, { 0x20, 0x0031, "EnvironmentalAtmosphericPressure" }, { 0x20, 0x0032, "EnvironmentalHumidity" }, { 0x20, 0x0033, "EnvironmentalTemperature" }, { 0x20, 0x0034, "EnvironmentalWindDirection" }, { 0x20, 0x0035, "EnvironmentalWindSpeed" }, { 0x20, 0x0036, "EnvironmentalAirQuality" }, { 0x20, 0x0037, "EnvironmentalHeatIndex" }, { 0x20, 0x0038, "EnvironmentalSurfaceTemperature" }, { 0x20, 0x0039, "EnvironmentalVolatileOrganicCompounds" }, { 0x20, 0x003a, "EnvironmentalObjectPresence" }, { 0x20, 0x003b, "EnvironmentalObjectProximity" }, { 0x20, 0x0040, "Light" }, { 0x20, 0x0041, "LightAmbientLight" }, { 0x20, 0x0042, "LightConsumerInfrared" }, { 0x20, 0x0043, "LightInfraredLight" }, { 0x20, 0x0044, "LightVisibleLight" }, { 0x20, 0x0045, "LightUltravioletLight" }, { 0x20, 0x0050, "Location" }, { 0x20, 0x0051, "LocationBroadcast" }, { 0x20, 0x0052, "LocationDeadReckoning" }, { 0x20, 0x0053, "LocationGPSGlobalPositioningSystem" }, { 0x20, 0x0054, "LocationLookup" }, { 0x20, 0x0055, "LocationOther" }, { 0x20, 0x0056, "LocationStatic" }, { 0x20, 0x0057, "LocationTriangulation" }, { 0x20, 0x0060, "Mechanical" }, { 0x20, 0x0061, "MechanicalBooleanSwitch" }, { 0x20, 0x0062, "MechanicalBooleanSwitchArray" }, { 0x20, 0x0063, "MechanicalMultivalueSwitch" }, { 0x20, 0x0064, "MechanicalForce" }, { 0x20, 0x0065, "MechanicalPressure" }, { 0x20, 0x0066, "MechanicalStrain" }, { 0x20, 0x0067, "MechanicalWeight" }, { 0x20, 0x0068, "MechanicalHapticVibrator" }, { 0x20, 0x0069, "MechanicalHallEffectSwitch" }, { 0x20, 0x0070, "Motion" }, { 0x20, 0x0071, "MotionAccelerometer1D" }, { 0x20, 0x0072, "MotionAccelerometer2D" }, { 0x20, 0x0073, "MotionAccelerometer3D" }, { 0x20, 0x0074, "MotionGyrometer1D" }, { 0x20, 0x0075, "MotionGyrometer2D" }, { 0x20, 0x0076, "MotionGyrometer3D" }, { 0x20, 0x0077, "MotionMotionDetector" }, { 0x20, 0x0078, "MotionSpeedometer" }, { 0x20, 0x0079, "MotionAccelerometer" }, { 0x20, 0x007a, "MotionGyrometer" }, { 0x20, 0x007b, "MotionGravityVector" }, { 0x20, 0x007c, "MotionLinearAccelerometer" }, { 0x20, 0x0080, "Orientation" }, { 0x20, 0x0081, "OrientationCompass1D" }, { 0x20, 0x0082, "OrientationCompass2D" }, { 0x20, 0x0083, "OrientationCompass3D" }, { 0x20, 0x0084, "OrientationInclinometer1D" }, { 0x20, 0x0085, "OrientationInclinometer2D" }, { 0x20, 0x0086, "OrientationInclinometer3D" }, { 0x20, 0x0087, "OrientationDistance1D" }, { 0x20, 0x0088, "OrientationDistance2D" }, { 0x20, 0x0089, "OrientationDistance3D" }, { 0x20, 0x008a, "OrientationDeviceOrientation" }, { 0x20, 0x008b, "OrientationCompass" }, { 0x20, 0x008c, "OrientationInclinometer" }, { 0x20, 0x008d, "OrientationDistance" }, { 0x20, 0x008e, "OrientationRelativeOrientation" }, { 0x20, 0x008f, "OrientationSimpleOrientation" }, { 0x20, 0x0090, "Scanner" }, { 0x20, 0x0091, "ScannerBarcode" }, { 0x20, 0x0092, "ScannerRFID" }, { 0x20, 0x0093, "ScannerNFC" }, { 0x20, 0x00a0, "Time" }, { 0x20, 0x00a1, "TimeAlarmTimer" }, { 0x20, 0x00a2, "TimeRealTimeClock" }, { 0x20, 0x00b0, "PersonalActivity" }, { 0x20, 0x00b1, "PersonalActivityActivityDetection" }, { 0x20, 0x00b2, "PersonalActivityDevicePosition" }, { 0x20, 0x00b3, "PersonalActivityFloorTracker" }, { 0x20, 0x00b4, "PersonalActivityPedometer" }, { 0x20, 0x00b5, "PersonalActivityStepDetection" }, { 0x20, 0x00c0, "OrientationExtended" }, { 0x20, 0x00c1, "OrientationExtendedGeomagneticOrientation" }, { 0x20, 0x00c2, "OrientationExtendedMagnetometer" }, { 0x20, 0x00d0, "Gesture" }, { 0x20, 0x00d1, "GestureChassisFlipGesture" }, { 0x20, 0x00d2, "GestureHingeFoldGesture" }, { 0x20, 0x00e0, "Other" }, { 0x20, 0x00e1, "OtherCustom" }, { 0x20, 0x00e2, "OtherGeneric" }, { 0x20, 0x00e3, "OtherGenericEnumerator" }, { 0x20, 0x00e4, "OtherHingeAngle" }, { 0x20, 0x00f0, "VendorReserved1" }, { 0x20, 0x00f1, "VendorReserved2" }, { 0x20, 0x00f2, "VendorReserved3" }, { 0x20, 0x00f3, "VendorReserved4" }, { 0x20, 0x00f4, "VendorReserved5" }, { 0x20, 0x00f5, "VendorReserved6" }, { 0x20, 0x00f6, "VendorReserved7" }, { 0x20, 0x00f7, "VendorReserved8" }, { 0x20, 0x00f8, "VendorReserved9" }, { 0x20, 0x00f9, "VendorReserved10" }, { 0x20, 0x00fa, "VendorReserved11" }, { 0x20, 0x00fb, "VendorReserved12" }, { 0x20, 0x00fc, "VendorReserved13" }, { 0x20, 0x00fd, "VendorReserved14" }, { 0x20, 0x00fe, "VendorReserved15" }, { 0x20, 0x00ff, "VendorReserved16" }, { 0x20, 0x0200, "Event" }, { 0x20, 0x0201, "EventSensorState" }, { 0x20, 0x0202, "EventSensorEvent" }, { 0x20, 0x0300, "Property" }, { 0x20, 0x0301, "PropertyFriendlyName" }, { 0x20, 0x0302, "PropertyPersistentUniqueID" }, { 0x20, 0x0303, "PropertySensorStatus" }, { 0x20, 0x0304, "PropertyMinimumReportInterval" }, { 0x20, 0x0305, "PropertySensorManufacturer" }, { 0x20, 0x0306, "PropertySensorModel" }, { 0x20, 0x0307, "PropertySensorSerialNumber" }, { 0x20, 0x0308, "PropertySensorDescription" }, { 0x20, 0x0309, "PropertySensorConnectionType" }, { 0x20, 0x030a, "PropertySensorDevicePath" }, { 0x20, 0x030b, "PropertyHardwareRevision" }, { 0x20, 0x030c, "PropertyFirmwareVersion" }, { 0x20, 0x030d, "PropertyReleaseDate" }, { 0x20, 0x030e, "PropertyReportInterval" }, { 0x20, 0x030f, "PropertyChangeSensitivityAbsolute" }, { 0x20, 0x0310, "PropertyChangeSensitivityPercentofRange" }, { 0x20, 0x0311, "PropertyChangeSensitivityPercentRelative" }, { 0x20, 0x0312, "PropertyAccuracy" }, { 0x20, 0x0313, "PropertyResolution" }, { 0x20, 0x0314, "PropertyMaximum" }, { 0x20, 0x0315, "PropertyMinimum" }, { 0x20, 0x0316, "PropertyReportingState" }, { 0x20, 0x0317, "PropertySamplingRate" }, { 0x20, 0x0318, "PropertyResponseCurve" }, { 0x20, 0x0319, "PropertyPowerState" }, { 0x20, 0x031a, "PropertyMaximumFIFOEvents" }, { 0x20, 0x031b, "PropertyReportLatency" }, { 0x20, 0x031c, "PropertyFlushFIFOEvents" }, { 0x20, 0x031d, "PropertyMaximumPowerConsumption" }, { 0x20, 0x031e, "PropertyIsPrimary" }, { 0x20, 0x031f, "PropertyHumanPresenceDetectionType" }, { 0x20, 0x0400, "DataFieldLocation" }, { 0x20, 0x0402, "DataFieldAltitudeAntennaSeaLevel" }, { 0x20, 0x0403, "DataFieldDifferentialReferenceStationID" }, { 0x20, 0x0404, "DataFieldAltitudeEllipsoidError" }, { 0x20, 0x0405, "DataFieldAltitudeEllipsoid" }, { 0x20, 0x0406, "DataFieldAltitudeSeaLevelError" }, { 0x20, 0x0407, "DataFieldAltitudeSeaLevel" }, { 0x20, 0x0408, "DataFieldDifferentialGPSDataAge" }, { 0x20, 0x0409, "DataFieldErrorRadius" }, { 0x20, 0x040a, "DataFieldFixQuality" }, { 0x20, 0x040b, "DataFieldFixType" }, { 0x20, 0x040c, "DataFieldGeoidalSeparation" }, { 0x20, 0x040d, "DataFieldGPSOperationMode" }, { 0x20, 0x040e, "DataFieldGPSSelectionMode" }, { 0x20, 0x040f, "DataFieldGPSStatus" }, { 0x20, 0x0410, "DataFieldPositionDilutionofPrecision" }, { 0x20, 0x0411, "DataFieldHorizontalDilutionofPrecision" }, { 0x20, 0x0412, "DataFieldVerticalDilutionofPrecision" }, { 0x20, 0x0413, "DataFieldLatitude" }, { 0x20, 0x0414, "DataFieldLongitude" }, { 0x20, 0x0415, "DataFieldTrueHeading" }, { 0x20, 0x0416, "DataFieldMagneticHeading" }, { 0x20, 0x0417, "DataFieldMagneticVariation" }, { 0x20, 0x0418, "DataFieldSpeed" }, { 0x20, 0x0419, "DataFieldSatellitesinView" }, { 0x20, 0x041a, "DataFieldSatellitesinViewAzimuth" }, { 0x20, 0x041b, "DataFieldSatellitesinViewElevation" }, { 0x20, 0x041c, "DataFieldSatellitesinViewIDs" }, { 0x20, 0x041d, "DataFieldSatellitesinViewPRNs" }, { 0x20, 0x041e, "DataFieldSatellitesinViewSNRatios" }, { 0x20, 0x041f, "DataFieldSatellitesUsedCount" }, { 0x20, 0x0420, "DataFieldSatellitesUsedPRNs" }, { 0x20, 0x0421, "DataFieldNMEASentence" }, { 0x20, 0x0422, "DataFieldAddressLine1" }, { 0x20, 0x0423, "DataFieldAddressLine2" }, { 0x20, 0x0424, "DataFieldCity" }, { 0x20, 0x0425, "DataFieldStateorProvince" }, { 0x20, 0x0426, "DataFieldCountryorRegion" }, { 0x20, 0x0427, "DataFieldPostalCode" }, { 0x20, 0x042a, "PropertyLocation" }, { 0x20, 0x042b, "PropertyLocationDesiredAccuracy" }, { 0x20, 0x0430, "DataFieldEnvironmental" }, { 0x20, 0x0431, "DataFieldAtmosphericPressure" }, { 0x20, 0x0433, "DataFieldRelativeHumidity" }, { 0x20, 0x0434, "DataFieldTemperature" }, { 0x20, 0x0435, "DataFieldWindDirection" }, { 0x20, 0x0436, "DataFieldWindSpeed" }, { 0x20, 0x0437, "DataFieldAirQualityIndex" }, { 0x20, 0x0438, "DataFieldEquivalentCO2" }, { 0x20, 0x0439, "DataFieldVolatileOrganicCompoundConcentration" }, { 0x20, 0x043a, "DataFieldObjectPresence" }, { 0x20, 0x043b, "DataFieldObjectProximityRange" }, { 0x20, 0x043c, "DataFieldObjectProximityOutofRange" }, { 0x20, 0x0440, "PropertyEnvironmental" }, { 0x20, 0x0441, "PropertyReferencePressure" }, { 0x20, 0x0450, "DataFieldMotion" }, { 0x20, 0x0451, "DataFieldMotionState" }, { 0x20, 0x0452, "DataFieldAcceleration" }, { 0x20, 0x0453, "DataFieldAccelerationAxisX" }, { 0x20, 0x0454, "DataFieldAccelerationAxisY" }, { 0x20, 0x0455, "DataFieldAccelerationAxisZ" }, { 0x20, 0x0456, "DataFieldAngularVelocity" }, { 0x20, 0x0457, "DataFieldAngularVelocityaboutXAxis" }, { 0x20, 0x0458, "DataFieldAngularVelocityaboutYAxis" }, { 0x20, 0x0459, "DataFieldAngularVelocityaboutZAxis" }, { 0x20, 0x045a, "DataFieldAngularPosition" }, { 0x20, 0x045b, "DataFieldAngularPositionaboutXAxis" }, { 0x20, 0x045c, "DataFieldAngularPositionaboutYAxis" }, { 0x20, 0x045d, "DataFieldAngularPositionaboutZAxis" }, { 0x20, 0x045e, "DataFieldMotionSpeed" }, { 0x20, 0x045f, "DataFieldMotionIntensity" }, { 0x20, 0x0470, "DataFieldOrientation" }, { 0x20, 0x0471, "DataFieldHeading" }, { 0x20, 0x0472, "DataFieldHeadingXAxis" }, { 0x20, 0x0473, "DataFieldHeadingYAxis" }, { 0x20, 0x0474, "DataFieldHeadingZAxis" }, { 0x20, 0x0475, "DataFieldHeadingCompensatedMagneticNorth" }, { 0x20, 0x0476, "DataFieldHeadingCompensatedTrueNorth" }, { 0x20, 0x0477, "DataFieldHeadingMagneticNorth" }, { 0x20, 0x0478, "DataFieldHeadingTrueNorth" }, { 0x20, 0x0479, "DataFieldDistance" }, { 0x20, 0x047a, "DataFieldDistanceXAxis" }, { 0x20, 0x047b, "DataFieldDistanceYAxis" }, { 0x20, 0x047c, "DataFieldDistanceZAxis" }, { 0x20, 0x047d, "DataFieldDistanceOutofRange" }, { 0x20, 0x047e, "DataFieldTilt" }, { 0x20, 0x047f, "DataFieldTiltXAxis" }, { 0x20, 0x0480, "DataFieldTiltYAxis" }, { 0x20, 0x0481, "DataFieldTiltZAxis" }, { 0x20, 0x0482, "DataFieldRotationMatrix" }, { 0x20, 0x0483, "DataFieldQuaternion" }, { 0x20, 0x0484, "DataFieldMagneticFlux" }, { 0x20, 0x0485, "DataFieldMagneticFluxXAxis" }, { 0x20, 0x0486, "DataFieldMagneticFluxYAxis" }, { 0x20, 0x0487, "DataFieldMagneticFluxZAxis" }, { 0x20, 0x0488, "DataFieldMagnetometerAccuracy" }, { 0x20, 0x0489, "DataFieldSimpleOrientationDirection" }, { 0x20, 0x0490, "DataFieldMechanical" }, { 0x20, 0x0491, "DataFieldBooleanSwitchState" }, { 0x20, 0x0492, "DataFieldBooleanSwitchArrayStates" }, { 0x20, 0x0493, "DataFieldMultivalueSwitchValue" }, { 0x20, 0x0494, "DataFieldForce" }, { 0x20, 0x0495, "DataFieldAbsolutePressure" }, { 0x20, 0x0496, "DataFieldGaugePressure" }, { 0x20, 0x0497, "DataFieldStrain" }, { 0x20, 0x0498, "DataFieldWeight" }, { 0x20, 0x04a0, "PropertyMechanical" }, { 0x20, 0x04a1, "PropertyVibrationState" }, { 0x20, 0x04a2, "PropertyForwardVibrationSpeed" }, { 0x20, 0x04a3, "PropertyBackwardVibrationSpeed" }, { 0x20, 0x04b0, "DataFieldBiometric" }, { 0x20, 0x04b1, "DataFieldHumanPresence" }, { 0x20, 0x04b2, "DataFieldHumanProximityRange" }, { 0x20, 0x04b3, "DataFieldHumanProximityOutofRange" }, { 0x20, 0x04b4, "DataFieldHumanTouchState" }, { 0x20, 0x04b5, "DataFieldBloodPressure" }, { 0x20, 0x04b6, "DataFieldBloodPressureDiastolic" }, { 0x20, 0x04b7, "DataFieldBloodPressureSystolic" }, { 0x20, 0x04b8, "DataFieldHeartRate" }, { 0x20, 0x04b9, "DataFieldRestingHeartRate" }, { 0x20, 0x04ba, "DataFieldHeartbeatInterval" }, { 0x20, 0x04bb, "DataFieldRespiratoryRate" }, { 0x20, 0x04bc, "DataFieldSpO2" }, { 0x20, 0x04bd, "DataFieldHumanAttentionDetected" }, { 0x20, 0x04be, "DataFieldHumanHeadAzimuth" }, { 0x20, 0x04bf, "DataFieldHumanHeadAltitude" }, { 0x20, 0x04c0, "DataFieldHumanHeadRoll" }, { 0x20, 0x04c1, "DataFieldHumanHeadPitch" }, { 0x20, 0x04c2, "DataFieldHumanHeadYaw" }, { 0x20, 0x04c3, "DataFieldHumanCorrelationId" }, { 0x20, 0x04d0, "DataFieldLight" }, { 0x20, 0x04d1, "DataFieldIlluminance" }, { 0x20, 0x04d2, "DataFieldColorTemperature" }, { 0x20, 0x04d3, "DataFieldChromaticity" }, { 0x20, 0x04d4, "DataFieldChromaticityX" }, { 0x20, 0x04d5, "DataFieldChromaticityY" }, { 0x20, 0x04d6, "DataFieldConsumerIRSentenceReceive" }, { 0x20, 0x04d7, "DataFieldInfraredLight" }, { 0x20, 0x04d8, "DataFieldRedLight" }, { 0x20, 0x04d9, "DataFieldGreenLight" }, { 0x20, 0x04da, "DataFieldBlueLight" }, { 0x20, 0x04db, "DataFieldUltravioletALight" }, { 0x20, 0x04dc, "DataFieldUltravioletBLight" }, { 0x20, 0x04dd, "DataFieldUltravioletIndex" }, { 0x20, 0x04de, "DataFieldNearInfraredLight" }, { 0x20, 0x04df, "PropertyLight" }, { 0x20, 0x04e0, "PropertyConsumerIRSentenceSend" }, { 0x20, 0x04e2, "PropertyAutoBrightnessPreferred" }, { 0x20, 0x04e3, "PropertyAutoColorPreferred" }, { 0x20, 0x04f0, "DataFieldScanner" }, { 0x20, 0x04f1, "DataFieldRFIDTag40Bit" }, { 0x20, 0x04f2, "DataFieldNFCSentenceReceive" }, { 0x20, 0x04f8, "PropertyScanner" }, { 0x20, 0x04f9, "PropertyNFCSentenceSend" }, { 0x20, 0x0500, "DataFieldElectrical" }, { 0x20, 0x0501, "DataFieldCapacitance" }, { 0x20, 0x0502, "DataFieldCurrent" }, { 0x20, 0x0503, "DataFieldElectricalPower" }, { 0x20, 0x0504, "DataFieldInductance" }, { 0x20, 0x0505, "DataFieldResistance" }, { 0x20, 0x0506, "DataFieldVoltage" }, { 0x20, 0x0507, "DataFieldFrequency" }, { 0x20, 0x0508, "DataFieldPeriod" }, { 0x20, 0x0509, "DataFieldPercentofRange" }, { 0x20, 0x0520, "DataFieldTime" }, { 0x20, 0x0521, "DataFieldYear" }, { 0x20, 0x0522, "DataFieldMonth" }, { 0x20, 0x0523, "DataFieldDay" }, { 0x20, 0x0524, "DataFieldDayofWeek" }, { 0x20, 0x0525, "DataFieldHour" }, { 0x20, 0x0526, "DataFieldMinute" }, { 0x20, 0x0527, "DataFieldSecond" }, { 0x20, 0x0528, "DataFieldMillisecond" }, { 0x20, 0x0529, "DataFieldTimestamp" }, { 0x20, 0x052a, "DataFieldJulianDayofYear" }, { 0x20, 0x052b, "DataFieldTimeSinceSystemBoot" }, { 0x20, 0x0530, "PropertyTime" }, { 0x20, 0x0531, "PropertyTimeZoneOffsetfromUTC" }, { 0x20, 0x0532, "PropertyTimeZoneName" }, { 0x20, 0x0533, "PropertyDaylightSavingsTimeObserved" }, { 0x20, 0x0534, "PropertyTimeTrimAdjustment" }, { 0x20, 0x0535, "PropertyArmAlarm" }, { 0x20, 0x0540, "DataFieldCustom" }, { 0x20, 0x0541, "DataFieldCustomUsage" }, { 0x20, 0x0542, "DataFieldCustomBooleanArray" }, { 0x20, 0x0543, "DataFieldCustomValue" }, { 0x20, 0x0544, "DataFieldCustomValue1" }, { 0x20, 0x0545, "DataFieldCustomValue2" }, { 0x20, 0x0546, "DataFieldCustomValue3" }, { 0x20, 0x0547, "DataFieldCustomValue4" }, { 0x20, 0x0548, "DataFieldCustomValue5" }, { 0x20, 0x0549, "DataFieldCustomValue6" }, { 0x20, 0x054a, "DataFieldCustomValue7" }, { 0x20, 0x054b, "DataFieldCustomValue8" }, { 0x20, 0x054c, "DataFieldCustomValue9" }, { 0x20, 0x054d, "DataFieldCustomValue10" }, { 0x20, 0x054e, "DataFieldCustomValue11" }, { 0x20, 0x054f, "DataFieldCustomValue12" }, { 0x20, 0x0550, "DataFieldCustomValue13" }, { 0x20, 0x0551, "DataFieldCustomValue14" }, { 0x20, 0x0552, "DataFieldCustomValue15" }, { 0x20, 0x0553, "DataFieldCustomValue16" }, { 0x20, 0x0554, "DataFieldCustomValue17" }, { 0x20, 0x0555, "DataFieldCustomValue18" }, { 0x20, 0x0556, "DataFieldCustomValue19" }, { 0x20, 0x0557, "DataFieldCustomValue20" }, { 0x20, 0x0558, "DataFieldCustomValue21" }, { 0x20, 0x0559, "DataFieldCustomValue22" }, { 0x20, 0x055a, "DataFieldCustomValue23" }, { 0x20, 0x055b, "DataFieldCustomValue24" }, { 0x20, 0x055c, "DataFieldCustomValue25" }, { 0x20, 0x055d, "DataFieldCustomValue26" }, { 0x20, 0x055e, "DataFieldCustomValue27" }, { 0x20, 0x055f, "DataFieldCustomValue28" }, { 0x20, 0x0560, "DataFieldGeneric" }, { 0x20, 0x0561, "DataFieldGenericGUIDorPROPERTYKEY" }, { 0x20, 0x0562, "DataFieldGenericCategoryGUID" }, { 0x20, 0x0563, "DataFieldGenericTypeGUID" }, { 0x20, 0x0564, "DataFieldGenericEventPROPERTYKEY" }, { 0x20, 0x0565, "DataFieldGenericPropertyPROPERTYKEY" }, { 0x20, 0x0566, "DataFieldGenericDataFieldPROPERTYKEY" }, { 0x20, 0x0567, "DataFieldGenericEvent" }, { 0x20, 0x0568, "DataFieldGenericProperty" }, { 0x20, 0x0569, "DataFieldGenericDataField" }, { 0x20, 0x056a, "DataFieldEnumeratorTableRowIndex" }, { 0x20, 0x056b, "DataFieldEnumeratorTableRowCount" }, { 0x20, 0x056c, "DataFieldGenericGUIDorPROPERTYKEYkind" }, { 0x20, 0x056d, "DataFieldGenericGUID" }, { 0x20, 0x056e, "DataFieldGenericPROPERTYKEY" }, { 0x20, 0x056f, "DataFieldGenericTopLevelCollectionID" }, { 0x20, 0x0570, "DataFieldGenericReportID" }, { 0x20, 0x0571, "DataFieldGenericReportItemPositionIndex" }, { 0x20, 0x0572, "DataFieldGenericFirmwareVARTYPE" }, { 0x20, 0x0573, "DataFieldGenericUnitofMeasure" }, { 0x20, 0x0574, "DataFieldGenericUnitExponent" }, { 0x20, 0x0575, "DataFieldGenericReportSize" }, { 0x20, 0x0576, "DataFieldGenericReportCount" }, { 0x20, 0x0580, "PropertyGeneric" }, { 0x20, 0x0581, "PropertyEnumeratorTableRowIndex" }, { 0x20, 0x0582, "PropertyEnumeratorTableRowCount" }, { 0x20, 0x0590, "DataFieldPersonalActivity" }, { 0x20, 0x0591, "DataFieldActivityType" }, { 0x20, 0x0592, "DataFieldActivityState" }, { 0x20, 0x0593, "DataFieldDevicePosition" }, { 0x20, 0x0594, "DataFieldStepCount" }, { 0x20, 0x0595, "DataFieldStepCountReset" }, { 0x20, 0x0596, "DataFieldStepDuration" }, { 0x20, 0x0597, "DataFieldStepType" }, { 0x20, 0x05a0, "PropertyMinimumActivityDetectionInterval" }, { 0x20, 0x05a1, "PropertySupportedActivityTypes" }, { 0x20, 0x05a2, "PropertySubscribedActivityTypes" }, { 0x20, 0x05a3, "PropertySupportedStepTypes" }, { 0x20, 0x05a4, "PropertySubscribedStepTypes" }, { 0x20, 0x05a5, "PropertyFloorHeight" }, { 0x20, 0x05b0, "DataFieldCustomTypeID" }, { 0x20, 0x05c0, "PropertyCustom" }, { 0x20, 0x05c1, "PropertyCustomValue1" }, { 0x20, 0x05c2, "PropertyCustomValue2" }, { 0x20, 0x05c3, "PropertyCustomValue3" }, { 0x20, 0x05c4, "PropertyCustomValue4" }, { 0x20, 0x05c5, "PropertyCustomValue5" }, { 0x20, 0x05c6, "PropertyCustomValue6" }, { 0x20, 0x05c7, "PropertyCustomValue7" }, { 0x20, 0x05c8, "PropertyCustomValue8" }, { 0x20, 0x05c9, "PropertyCustomValue9" }, { 0x20, 0x05ca, "PropertyCustomValue10" }, { 0x20, 0x05cb, "PropertyCustomValue11" }, { 0x20, 0x05cc, "PropertyCustomValue12" }, { 0x20, 0x05cd, "PropertyCustomValue13" }, { 0x20, 0x05ce, "PropertyCustomValue14" }, { 0x20, 0x05cf, "PropertyCustomValue15" }, { 0x20, 0x05d0, "PropertyCustomValue16" }, { 0x20, 0x05e0, "DataFieldHinge" }, { 0x20, 0x05e1, "DataFieldHingeAngle" }, { 0x20, 0x05f0, "DataFieldGestureSensor" }, { 0x20, 0x05f1, "DataFieldGestureState" }, { 0x20, 0x05f2, "DataFieldHingeFoldInitialAngle" }, { 0x20, 0x05f3, "DataFieldHingeFoldFinalAngle" }, { 0x20, 0x05f4, "DataFieldHingeFoldContributingPanel" }, { 0x20, 0x05f5, "DataFieldHingeFoldType" }, { 0x20, 0x0800, "SensorStateUndefined" }, { 0x20, 0x0801, "SensorStateReady" }, { 0x20, 0x0802, "SensorStateNotAvailable" }, { 0x20, 0x0803, "SensorStateNoData" }, { 0x20, 0x0804, "SensorStateInitializing" }, { 0x20, 0x0805, "SensorStateAccessDenied" }, { 0x20, 0x0806, "SensorStateError" }, { 0x20, 0x0810, "SensorEventUnknown" }, { 0x20, 0x0811, "SensorEventStateChanged" }, { 0x20, 0x0812, "SensorEventPropertyChanged" }, { 0x20, 0x0813, "SensorEventDataUpdated" }, { 0x20, 0x0814, "SensorEventPollResponse" }, { 0x20, 0x0815, "SensorEventChangeSensitivity" }, { 0x20, 0x0816, "SensorEventRangeMaximumReached" }, { 0x20, 0x0817, "SensorEventRangeMinimumReached" }, { 0x20, 0x0818, "SensorEventHighThresholdCrossUpward" }, { 0x20, 0x0819, "SensorEventHighThresholdCrossDownward" }, { 0x20, 0x081a, "SensorEventLowThresholdCrossUpward" }, { 0x20, 0x081b, "SensorEventLowThresholdCrossDownward" }, { 0x20, 0x081c, "SensorEventZeroThresholdCrossUpward" }, { 0x20, 0x081d, "SensorEventZeroThresholdCrossDownward" }, { 0x20, 0x081e, "SensorEventPeriodExceeded" }, { 0x20, 0x081f, "SensorEventFrequencyExceeded" }, { 0x20, 0x0820, "SensorEventComplexTrigger" }, { 0x20, 0x0830, "ConnectionTypePCIntegrated" }, { 0x20, 0x0831, "ConnectionTypePCAttached" }, { 0x20, 0x0832, "ConnectionTypePCExternal" }, { 0x20, 0x0840, "ReportingStateReportNoEvents" }, { 0x20, 0x0841, "ReportingStateReportAllEvents" }, { 0x20, 0x0842, "ReportingStateReportThresholdEvents" }, { 0x20, 0x0843, "ReportingStateWakeOnNoEvents" }, { 0x20, 0x0844, "ReportingStateWakeOnAllEvents" }, { 0x20, 0x0845, "ReportingStateWakeOnThresholdEvents" }, { 0x20, 0x0846, "ReportingStateAnytime" }, { 0x20, 0x0850, "PowerStateUndefined" }, { 0x20, 0x0851, "PowerStateD0FullPower" }, { 0x20, 0x0852, "PowerStateD1LowPower" }, { 0x20, 0x0853, "PowerStateD2StandbyPowerwithWakeup" }, { 0x20, 0x0854, "PowerStateD3SleepwithWakeup" }, { 0x20, 0x0855, "PowerStateD4PowerOff" }, { 0x20, 0x0860, "AccuracyDefault" }, { 0x20, 0x0861, "AccuracyHigh" }, { 0x20, 0x0862, "AccuracyMedium" }, { 0x20, 0x0863, "AccuracyLow" }, { 0x20, 0x0870, "FixQualityNoFix" }, { 0x20, 0x0871, "FixQualityGPS" }, { 0x20, 0x0872, "FixQualityDGPS" }, { 0x20, 0x0880, "FixTypeNoFix" }, { 0x20, 0x0881, "FixTypeGPSSPSModeFixValid" }, { 0x20, 0x0882, "FixTypeDGPSSPSModeFixValid" }, { 0x20, 0x0883, "FixTypeGPSPPSModeFixValid" }, { 0x20, 0x0884, "FixTypeRealTimeKinematic" }, { 0x20, 0x0885, "FixTypeFloatRTK" }, { 0x20, 0x0886, "FixTypeEstimateddeadreckoned" }, { 0x20, 0x0887, "FixTypeManualInputMode" }, { 0x20, 0x0888, "FixTypeSimulatorMode" }, { 0x20, 0x0890, "GPSOperationModeManual" }, { 0x20, 0x0891, "GPSOperationModeAutomatic" }, { 0x20, 0x08a0, "GPSSelectionModeAutonomous" }, { 0x20, 0x08a1, "GPSSelectionModeDGPS" }, { 0x20, 0x08a2, "GPSSelectionModeEstimateddeadreckoned" }, { 0x20, 0x08a3, "GPSSelectionModeManualInput" }, { 0x20, 0x08a4, "GPSSelectionModeSimulator" }, { 0x20, 0x08a5, "GPSSelectionModeDataNotValid" }, { 0x20, 0x08b0, "GPSStatusDataValid" }, { 0x20, 0x08b1, "GPSStatusDataNotValid" }, { 0x20, 0x08c0, "DayofWeekSunday" }, { 0x20, 0x08c1, "DayofWeekMonday" }, { 0x20, 0x08c2, "DayofWeekTuesday" }, { 0x20, 0x08c3, "DayofWeekWednesday" }, { 0x20, 0x08c4, "DayofWeekThursday" }, { 0x20, 0x08c5, "DayofWeekFriday" }, { 0x20, 0x08c6, "DayofWeekSaturday" }, { 0x20, 0x08d0, "KindCategory" }, { 0x20, 0x08d1, "KindType" }, { 0x20, 0x08d2, "KindEvent" }, { 0x20, 0x08d3, "KindProperty" }, { 0x20, 0x08d4, "KindDataField" }, { 0x20, 0x08e0, "MagnetometerAccuracyLow" }, { 0x20, 0x08e1, "MagnetometerAccuracyMedium" }, { 0x20, 0x08e2, "MagnetometerAccuracyHigh" }, { 0x20, 0x08f0, "SimpleOrientationDirectionNotRotated" }, { 0x20, 0x08f1, "SimpleOrientationDirectionRotated90DegreesCCW" }, { 0x20, 0x08f2, "SimpleOrientationDirectionRotated180DegreesCCW" }, { 0x20, 0x08f3, "SimpleOrientationDirectionRotated270DegreesCCW" }, { 0x20, 0x08f4, "SimpleOrientationDirectionFaceUp" }, { 0x20, 0x08f5, "SimpleOrientationDirectionFaceDown" }, { 0x20, 0x0900, "VT_NULL" }, { 0x20, 0x0901, "VT_BOOL" }, { 0x20, 0x0902, "VT_UI1" }, { 0x20, 0x0903, "VT_I1" }, { 0x20, 0x0904, "VT_UI2" }, { 0x20, 0x0905, "VT_I2" }, { 0x20, 0x0906, "VT_UI4" }, { 0x20, 0x0907, "VT_I4" }, { 0x20, 0x0908, "VT_UI8" }, { 0x20, 0x0909, "VT_I8" }, { 0x20, 0x090a, "VT_R4" }, { 0x20, 0x090b, "VT_R8" }, { 0x20, 0x090c, "VT_WSTR" }, { 0x20, 0x090d, "VT_STR" }, { 0x20, 0x090e, "VT_CLSID" }, { 0x20, 0x090f, "VT_VECTORVT_UI1" }, { 0x20, 0x0910, "VT_F16E0" }, { 0x20, 0x0911, "VT_F16E1" }, { 0x20, 0x0912, "VT_F16E2" }, { 0x20, 0x0913, "VT_F16E3" }, { 0x20, 0x0914, "VT_F16E4" }, { 0x20, 0x0915, "VT_F16E5" }, { 0x20, 0x0916, "VT_F16E6" }, { 0x20, 0x0917, "VT_F16E7" }, { 0x20, 0x0918, "VT_F16E8" }, { 0x20, 0x0919, "VT_F16E9" }, { 0x20, 0x091a, "VT_F16EA" }, { 0x20, 0x091b, "VT_F16EB" }, { 0x20, 0x091c, "VT_F16EC" }, { 0x20, 0x091d, "VT_F16ED" }, { 0x20, 0x091e, "VT_F16EE" }, { 0x20, 0x091f, "VT_F16EF" }, { 0x20, 0x0920, "VT_F32E0" }, { 0x20, 0x0921, "VT_F32E1" }, { 0x20, 0x0922, "VT_F32E2" }, { 0x20, 0x0923, "VT_F32E3" }, { 0x20, 0x0924, "VT_F32E4" }, { 0x20, 0x0925, "VT_F32E5" }, { 0x20, 0x0926, "VT_F32E6" }, { 0x20, 0x0927, "VT_F32E7" }, { 0x20, 0x0928, "VT_F32E8" }, { 0x20, 0x0929, "VT_F32E9" }, { 0x20, 0x092a, "VT_F32EA" }, { 0x20, 0x092b, "VT_F32EB" }, { 0x20, 0x092c, "VT_F32EC" }, { 0x20, 0x092d, "VT_F32ED" }, { 0x20, 0x092e, "VT_F32EE" }, { 0x20, 0x092f, "VT_F32EF" }, { 0x20, 0x0930, "ActivityTypeUnknown" }, { 0x20, 0x0931, "ActivityTypeStationary" }, { 0x20, 0x0932, "ActivityTypeFidgeting" }, { 0x20, 0x0933, "ActivityTypeWalking" }, { 0x20, 0x0934, "ActivityTypeRunning" }, { 0x20, 0x0935, "ActivityTypeInVehicle" }, { 0x20, 0x0936, "ActivityTypeBiking" }, { 0x20, 0x0937, "ActivityTypeIdle" }, { 0x20, 0x0940, "UnitNotSpecified" }, { 0x20, 0x0941, "UnitLux" }, { 0x20, 0x0942, "UnitDegreesKelvin" }, { 0x20, 0x0943, "UnitDegreesCelsius" }, { 0x20, 0x0944, "UnitPascal" }, { 0x20, 0x0945, "UnitNewton" }, { 0x20, 0x0946, "UnitMetersSecond" }, { 0x20, 0x0947, "UnitKilogram" }, { 0x20, 0x0948, "UnitMeter" }, { 0x20, 0x0949, "UnitMetersSecondSecond" }, { 0x20, 0x094a, "UnitFarad" }, { 0x20, 0x094b, "UnitAmpere" }, { 0x20, 0x094c, "UnitWatt" }, { 0x20, 0x094d, "UnitHenry" }, { 0x20, 0x094e, "UnitOhm" }, { 0x20, 0x094f, "UnitVolt" }, { 0x20, 0x0950, "UnitHertz" }, { 0x20, 0x0951, "UnitBar" }, { 0x20, 0x0952, "UnitDegreesAnticlockwise" }, { 0x20, 0x0953, "UnitDegreesClockwise" }, { 0x20, 0x0954, "UnitDegrees" }, { 0x20, 0x0955, "UnitDegreesSecond" }, { 0x20, 0x0956, "UnitDegreesSecondSecond" }, { 0x20, 0x0957, "UnitKnot" }, { 0x20, 0x0958, "UnitPercent" }, { 0x20, 0x0959, "UnitSecond" }, { 0x20, 0x095a, "UnitMillisecond" }, { 0x20, 0x095b, "UnitG" }, { 0x20, 0x095c, "UnitBytes" }, { 0x20, 0x095d, "UnitMilligauss" }, { 0x20, 0x095e, "UnitBits" }, { 0x20, 0x0960, "ActivityStateNoStateChange" }, { 0x20, 0x0961, "ActivityStateStartActivity" }, { 0x20, 0x0962, "ActivityStateEndActivity" }, { 0x20, 0x0970, "Exponent0" }, { 0x20, 0x0971, "Exponent1" }, { 0x20, 0x0972, "Exponent2" }, { 0x20, 0x0973, "Exponent3" }, { 0x20, 0x0974, "Exponent4" }, { 0x20, 0x0975, "Exponent5" }, { 0x20, 0x0976, "Exponent6" }, { 0x20, 0x0977, "Exponent7" }, { 0x20, 0x0978, "Exponent8" }, { 0x20, 0x0979, "Exponent9" }, { 0x20, 0x097a, "ExponentA" }, { 0x20, 0x097b, "ExponentB" }, { 0x20, 0x097c, "ExponentC" }, { 0x20, 0x097d, "ExponentD" }, { 0x20, 0x097e, "ExponentE" }, { 0x20, 0x097f, "ExponentF" }, { 0x20, 0x0980, "DevicePositionUnknown" }, { 0x20, 0x0981, "DevicePositionUnchanged" }, { 0x20, 0x0982, "DevicePositionOnDesk" }, { 0x20, 0x0983, "DevicePositionInHand" }, { 0x20, 0x0984, "DevicePositionMovinginBag" }, { 0x20, 0x0985, "DevicePositionStationaryinBag" }, { 0x20, 0x0990, "StepTypeUnknown" }, { 0x20, 0x0991, "StepTypeWalking" }, { 0x20, 0x0992, "StepTypeRunning" }, { 0x20, 0x09a0, "GestureStateUnknown" }, { 0x20, 0x09a1, "GestureStateStarted" }, { 0x20, 0x09a2, "GestureStateCompleted" }, { 0x20, 0x09a3, "GestureStateCancelled" }, { 0x20, 0x09b0, "HingeFoldContributingPanelUnknown" }, { 0x20, 0x09b1, "HingeFoldContributingPanelPanel1" }, { 0x20, 0x09b2, "HingeFoldContributingPanelPanel2" }, { 0x20, 0x09b3, "HingeFoldContributingPanelBoth" }, { 0x20, 0x09b4, "HingeFoldTypeUnknown" }, { 0x20, 0x09b5, "HingeFoldTypeIncreasing" }, { 0x20, 0x09b6, "HingeFoldTypeDecreasing" }, { 0x20, 0x09c0, "HumanPresenceDetectionTypeVendorDefinedNonBiometric" }, { 0x20, 0x09c1, "HumanPresenceDetectionTypeVendorDefinedBiometric" }, { 0x20, 0x09c2, "HumanPresenceDetectionTypeFacialBiometric" }, { 0x20, 0x09c3, "HumanPresenceDetectionTypeAudioBiometric" }, { 0x20, 0x1000, "ModifierChangeSensitivityAbsolute" }, { 0x20, 0x2000, "ModifierMaximum" }, { 0x20, 0x3000, "ModifierMinimum" }, { 0x20, 0x4000, "ModifierAccuracy" }, { 0x20, 0x5000, "ModifierResolution" }, { 0x20, 0x6000, "ModifierThresholdHigh" }, { 0x20, 0x7000, "ModifierThresholdLow" }, { 0x20, 0x8000, "ModifierCalibrationOffset" }, { 0x20, 0x9000, "ModifierCalibrationMultiplier" }, { 0x20, 0xa000, "ModifierReportInterval" }, { 0x20, 0xb000, "ModifierFrequencyMax" }, { 0x20, 0xc000, "ModifierPeriodMax" }, { 0x20, 0xd000, "ModifierChangeSensitivityPercentofRange" }, { 0x20, 0xe000, "ModifierChangeSensitivityPercentRelative" }, { 0x20, 0xf000, "ModifierVendorReserved" }, { 0x40, 0, "MedicalInstrument" }, { 0x40, 0x0001, "MedicalUltrasound" }, { 0x40, 0x0020, "VCRAcquisition" }, { 0x40, 0x0021, "FreezeThaw" }, { 0x40, 0x0022, "ClipStore" }, { 0x40, 0x0023, "Update" }, { 0x40, 0x0024, "Next" }, { 0x40, 0x0025, "Save" }, { 0x40, 0x0026, "Print" }, { 0x40, 0x0027, "MicrophoneEnable" }, { 0x40, 0x0040, "Cine" }, { 0x40, 0x0041, "TransmitPower" }, { 0x40, 0x0042, "Volume" }, { 0x40, 0x0043, "Focus" }, { 0x40, 0x0044, "Depth" }, { 0x40, 0x0060, "SoftStepPrimary" }, { 0x40, 0x0061, "SoftStepSecondary" }, { 0x40, 0x0070, "DepthGainCompensation" }, { 0x40, 0x0080, "ZoomSelect" }, { 0x40, 0x0081, "ZoomAdjust" }, { 0x40, 0x0082, "SpectralDopplerModeSelect" }, { 0x40, 0x0083, "SpectralDopplerAdjust" }, { 0x40, 0x0084, "ColorDopplerModeSelect" }, { 0x40, 0x0085, "ColorDopplerAdjust" }, { 0x40, 0x0086, "MotionModeSelect" }, { 0x40, 0x0087, "MotionModeAdjust" }, { 0x40, 0x0088, "2DModeSelect" }, { 0x40, 0x0089, "2DModeAdjust" }, { 0x40, 0x00a0, "SoftControlSelect" }, { 0x40, 0x00a1, "SoftControlAdjust" }, { 0x41, 0, "BrailleDisplay" }, { 0x41, 0x0001, "BrailleDisplay" }, { 0x41, 0x0002, "BrailleRow" }, { 0x41, 0x0003, "8DotBrailleCell" }, { 0x41, 0x0004, "6DotBrailleCell" }, { 0x41, 0x0005, "NumberofBrailleCells" }, { 0x41, 0x0006, "ScreenReaderControl" }, { 0x41, 0x0007, "ScreenReaderIdentifier" }, { 0x41, 0x00fa, "RouterSet1" }, { 0x41, 0x00fb, "RouterSet2" }, { 0x41, 0x00fc, "RouterSet3" }, { 0x41, 0x0100, "RouterKey" }, { 0x41, 0x0101, "RowRouterKey" }, { 0x41, 0x0200, "BrailleButtons" }, { 0x41, 0x0201, "BrailleKeyboardDot1" }, { 0x41, 0x0202, "BrailleKeyboardDot2" }, { 0x41, 0x0203, "BrailleKeyboardDot3" }, { 0x41, 0x0204, "BrailleKeyboardDot4" }, { 0x41, 0x0205, "BrailleKeyboardDot5" }, { 0x41, 0x0206, "BrailleKeyboardDot6" }, { 0x41, 0x0207, "BrailleKeyboardDot7" }, { 0x41, 0x0208, "BrailleKeyboardDot8" }, { 0x41, 0x0209, "BrailleKeyboardSpace" }, { 0x41, 0x020a, "BrailleKeyboardLeftSpace" }, { 0x41, 0x020b, "BrailleKeyboardRightSpace" }, { 0x41, 0x020c, "BrailleFaceControls" }, { 0x41, 0x020d, "BrailleLeftControls" }, { 0x41, 0x020e, "BrailleRightControls" }, { 0x41, 0x020f, "BrailleTopControls" }, { 0x41, 0x0210, "BrailleJoystickCenter" }, { 0x41, 0x0211, "BrailleJoystickUp" }, { 0x41, 0x0212, "BrailleJoystickDown" }, { 0x41, 0x0213, "BrailleJoystickLeft" }, { 0x41, 0x0214, "BrailleJoystickRight" }, { 0x41, 0x0215, "BrailleDPadCenter" }, { 0x41, 0x0216, "BrailleDPadUp" }, { 0x41, 0x0217, "BrailleDPadDown" }, { 0x41, 0x0218, "BrailleDPadLeft" }, { 0x41, 0x0219, "BrailleDPadRight" }, { 0x41, 0x021a, "BraillePanLeft" }, { 0x41, 0x021b, "BraillePanRight" }, { 0x41, 0x021c, "BrailleRockerUp" }, { 0x41, 0x021d, "BrailleRockerDown" }, { 0x41, 0x021e, "BrailleRockerPress" }, { 0x59, 0, "LightingAndIllumination" }, { 0x59, 0x0001, "LampArray" }, { 0x59, 0x0002, "LampArrayAttributesReport" }, { 0x59, 0x0003, "LampCount" }, { 0x59, 0x0004, "BoundingBoxWidthInMicrometers" }, { 0x59, 0x0005, "BoundingBoxHeightInMicrometers" }, { 0x59, 0x0006, "BoundingBoxDepthInMicrometers" }, { 0x59, 0x0007, "LampArrayKind" }, { 0x59, 0x0008, "MinUpdateIntervalInMicroseconds" }, { 0x59, 0x0020, "LampAttributesRequestReport" }, { 0x59, 0x0021, "LampId" }, { 0x59, 0x0022, "LampAttributesResponseReport" }, { 0x59, 0x0023, "PositionXInMicrometers" }, { 0x59, 0x0024, "PositionYInMicrometers" }, { 0x59, 0x0025, "PositionZInMicrometers" }, { 0x59, 0x0026, "LampPurposes" }, { 0x59, 0x0027, "UpdateLatencyInMicroseconds" }, { 0x59, 0x0028, "RedLevelCount" }, { 0x59, 0x0029, "GreenLevelCount" }, { 0x59, 0x002a, "BlueLevelCount" }, { 0x59, 0x002b, "IntensityLevelCount" }, { 0x59, 0x002c, "IsProgrammable" }, { 0x59, 0x002d, "InputBinding" }, { 0x59, 0x0050, "LampMultiUpdateReport" }, { 0x59, 0x0051, "RedUpdateChannel" }, { 0x59, 0x0052, "GreenUpdateChannel" }, { 0x59, 0x0053, "BlueUpdateChannel" }, { 0x59, 0x0054, "IntensityUpdateChannel" }, { 0x59, 0x0055, "LampUpdateFlags" }, { 0x59, 0x0060, "LampRangeUpdateReport" }, { 0x59, 0x0061, "LampIdStart" }, { 0x59, 0x0062, "LampIdEnd" }, { 0x59, 0x0070, "LampArrayControlReport" }, { 0x59, 0x0071, "AutonomousMode" }, { 0x80, 0, "Monitor" }, { 0x80, 0x0001, "MonitorControl" }, { 0x80, 0x0002, "EDIDInformation" }, { 0x80, 0x0003, "VDIFInformation" }, { 0x80, 0x0004, "VESAVersion" }, { 0x81, 0, "MonitorEnumerated" }, { 0x82, 0, "VESAVirtualControls" }, { 0x82, 0x0001, "Degauss" }, { 0x82, 0x0010, "Brightness" }, { 0x82, 0x0012, "Contrast" }, { 0x82, 0x0016, "RedVideoGain" }, { 0x82, 0x0018, "GreenVideoGain" }, { 0x82, 0x001a, "BlueVideoGain" }, { 0x82, 0x001c, "Focus" }, { 0x82, 0x0020, "HorizontalPosition" }, { 0x82, 0x0022, "HorizontalSize" }, { 0x82, 0x0024, "HorizontalPincushion" }, { 0x82, 0x0026, "HorizontalPincushionBalance" }, { 0x82, 0x0028, "HorizontalMisconvergence" }, { 0x82, 0x002a, "HorizontalLinearity" }, { 0x82, 0x002c, "HorizontalLinearityBalance" }, { 0x82, 0x0030, "VerticalPosition" }, { 0x82, 0x0032, "VerticalSize" }, { 0x82, 0x0034, "VerticalPincushion" }, { 0x82, 0x0036, "VerticalPincushionBalance" }, { 0x82, 0x0038, "VerticalMisconvergence" }, { 0x82, 0x003a, "VerticalLinearity" }, { 0x82, 0x003c, "VerticalLinearityBalance" }, { 0x82, 0x0040, "ParallelogramDistortionKeyBalance" }, { 0x82, 0x0042, "TrapezoidalDistortionKey" }, { 0x82, 0x0044, "TiltRotation" }, { 0x82, 0x0046, "TopCornerDistortionControl" }, { 0x82, 0x0048, "TopCornerDistortionBalance" }, { 0x82, 0x004a, "BottomCornerDistortionControl" }, { 0x82, 0x004c, "BottomCornerDistortionBalance" }, { 0x82, 0x0056, "HorizontalMoire" }, { 0x82, 0x0058, "VerticalMoire" }, { 0x82, 0x005e, "InputLevelSelect" }, { 0x82, 0x0060, "InputSourceSelect" }, { 0x82, 0x006c, "RedVideoBlackLevel" }, { 0x82, 0x006e, "GreenVideoBlackLevel" }, { 0x82, 0x0070, "BlueVideoBlackLevel" }, { 0x82, 0x00a2, "AutoSizeCenter" }, { 0x82, 0x00a4, "PolarityHorizontalSynchronization" }, { 0x82, 0x00a6, "PolarityVerticalSynchronization" }, { 0x82, 0x00a8, "SynchronizationType" }, { 0x82, 0x00aa, "ScreenOrientation" }, { 0x82, 0x00ac, "HorizontalFrequency" }, { 0x82, 0x00ae, "VerticalFrequency" }, { 0x82, 0x00b0, "Settings" }, { 0x82, 0x00ca, "OnScreenDisplay" }, { 0x82, 0x00d4, "StereoMode" }, { 0x84, 0, "Power" }, { 0x84, 0x0001, "iName" }, { 0x84, 0x0002, "PresentStatus" }, { 0x84, 0x0003, "ChangedStatus" }, { 0x84, 0x0004, "UPS" }, { 0x84, 0x0005, "PowerSupply" }, { 0x84, 0x0010, "BatterySystem" }, { 0x84, 0x0011, "BatterySystemId" }, { 0x84, 0x0012, "Battery" }, { 0x84, 0x0013, "BatteryId" }, { 0x84, 0x0014, "Charger" }, { 0x84, 0x0015, "ChargerId" }, { 0x84, 0x0016, "PowerConverter" }, { 0x84, 0x0017, "PowerConverterId" }, { 0x84, 0x0018, "OutletSystem" }, { 0x84, 0x0019, "OutletSystemId" }, { 0x84, 0x001a, "Input" }, { 0x84, 0x001b, "InputId" }, { 0x84, 0x001c, "Output" }, { 0x84, 0x001d, "OutputId" }, { 0x84, 0x001e, "Flow" }, { 0x84, 0x001f, "FlowId" }, { 0x84, 0x0020, "Outlet" }, { 0x84, 0x0021, "OutletId" }, { 0x84, 0x0022, "Gang" }, { 0x84, 0x0023, "GangId" }, { 0x84, 0x0024, "PowerSummary" }, { 0x84, 0x0025, "PowerSummaryId" }, { 0x84, 0x0030, "Voltage" }, { 0x84, 0x0031, "Current" }, { 0x84, 0x0032, "Frequency" }, { 0x84, 0x0033, "ApparentPower" }, { 0x84, 0x0034, "ActivePower" }, { 0x84, 0x0035, "PercentLoad" }, { 0x84, 0x0036, "Temperature" }, { 0x84, 0x0037, "Humidity" }, { 0x84, 0x0038, "BadCount" }, { 0x84, 0x0040, "ConfigVoltage" }, { 0x84, 0x0041, "ConfigCurrent" }, { 0x84, 0x0042, "ConfigFrequency" }, { 0x84, 0x0043, "ConfigApparentPower" }, { 0x84, 0x0044, "ConfigActivePower" }, { 0x84, 0x0045, "ConfigPercentLoad" }, { 0x84, 0x0046, "ConfigTemperature" }, { 0x84, 0x0047, "ConfigHumidity" }, { 0x84, 0x0050, "SwitchOnControl" }, { 0x84, 0x0051, "SwitchOffControl" }, { 0x84, 0x0052, "ToggleControl" }, { 0x84, 0x0053, "LowVoltageTransfer" }, { 0x84, 0x0054, "HighVoltageTransfer" }, { 0x84, 0x0055, "DelayBeforeReboot" }, { 0x84, 0x0056, "DelayBeforeStartup" }, { 0x84, 0x0057, "DelayBeforeShutdown" }, { 0x84, 0x0058, "Test" }, { 0x84, 0x0059, "ModuleReset" }, { 0x84, 0x005a, "AudibleAlarmControl" }, { 0x84, 0x0060, "Present" }, { 0x84, 0x0061, "Good" }, { 0x84, 0x0062, "InternalFailure" }, { 0x84, 0x0063, "VoltagOutOfRange" }, { 0x84, 0x0064, "FrequencyOutOfRange" }, { 0x84, 0x0065, "Overload" }, { 0x84, 0x0066, "OverCharged" }, { 0x84, 0x0067, "OverTemperature" }, { 0x84, 0x0068, "ShutdownRequested" }, { 0x84, 0x0069, "ShutdownImminent" }, { 0x84, 0x006b, "SwitchOnOff" }, { 0x84, 0x006c, "Switchable" }, { 0x84, 0x006d, "Used" }, { 0x84, 0x006e, "Boost" }, { 0x84, 0x006f, "Buck" }, { 0x84, 0x0070, "Initialized" }, { 0x84, 0x0071, "Tested" }, { 0x84, 0x0072, "AwaitingPower" }, { 0x84, 0x0073, "CommunicationLost" }, { 0x84, 0x00fd, "iManufacturer" }, { 0x84, 0x00fe, "iProduct" }, { 0x84, 0x00ff, "iSerialNumber" }, { 0x85, 0, "BatterySystem" }, { 0x85, 0x0001, "SmartBatteryBatteryMode" }, { 0x85, 0x0002, "SmartBatteryBatteryStatus" }, { 0x85, 0x0003, "SmartBatteryAlarmWarning" }, { 0x85, 0x0004, "SmartBatteryChargerMode" }, { 0x85, 0x0005, "SmartBatteryChargerStatus" }, { 0x85, 0x0006, "SmartBatteryChargerSpecInfo" }, { 0x85, 0x0007, "SmartBatterySelectorState" }, { 0x85, 0x0008, "SmartBatterySelectorPresets" }, { 0x85, 0x0009, "SmartBatterySelectorInfo" }, { 0x85, 0x0010, "OptionalMfgFunction1" }, { 0x85, 0x0011, "OptionalMfgFunction2" }, { 0x85, 0x0012, "OptionalMfgFunction3" }, { 0x85, 0x0013, "OptionalMfgFunction4" }, { 0x85, 0x0014, "OptionalMfgFunction5" }, { 0x85, 0x0015, "ConnectionToSMBus" }, { 0x85, 0x0016, "OutputConnection" }, { 0x85, 0x0017, "ChargerConnection" }, { 0x85, 0x0018, "BatteryInsertion" }, { 0x85, 0x0019, "UseNext" }, { 0x85, 0x001a, "OKToUse" }, { 0x85, 0x001b, "BatterySupported" }, { 0x85, 0x001c, "SelectorRevision" }, { 0x85, 0x001d, "ChargingIndicator" }, { 0x85, 0x0028, "ManufacturerAccess" }, { 0x85, 0x0029, "RemainingCapacityLimit" }, { 0x85, 0x002a, "RemainingTimeLimit" }, { 0x85, 0x002b, "AtRate" }, { 0x85, 0x002c, "CapacityMode" }, { 0x85, 0x002d, "BroadcastToCharger" }, { 0x85, 0x002e, "PrimaryBattery" }, { 0x85, 0x002f, "ChargeController" }, { 0x85, 0x0040, "TerminateCharge" }, { 0x85, 0x0041, "TerminateDischarge" }, { 0x85, 0x0042, "BelowRemainingCapacityLimit" }, { 0x85, 0x0043, "RemainingTimeLimitExpired" }, { 0x85, 0x0044, "Charging" }, { 0x85, 0x0045, "Discharging" }, { 0x85, 0x0046, "FullyCharged" }, { 0x85, 0x0047, "FullyDischarged" }, { 0x85, 0x0048, "ConditioningFlag" }, { 0x85, 0x0049, "AtRateOK" }, { 0x85, 0x004a, "SmartBatteryErrorCode" }, { 0x85, 0x004b, "NeedReplacement" }, { 0x85, 0x0060, "AtRateTimeToFull" }, { 0x85, 0x0061, "AtRateTimeToEmpty" }, { 0x85, 0x0062, "AverageCurrent" }, { 0x85, 0x0063, "MaxError" }, { 0x85, 0x0064, "RelativeStateOfCharge" }, { 0x85, 0x0065, "AbsoluteStateOfCharge" }, { 0x85, 0x0066, "RemainingCapacity" }, { 0x85, 0x0067, "FullChargeCapacity" }, { 0x85, 0x0068, "RunTimeToEmpty" }, { 0x85, 0x0069, "AverageTimeToEmpty" }, { 0x85, 0x006a, "AverageTimeToFull" }, { 0x85, 0x006b, "CycleCount" }, { 0x85, 0x0080, "BatteryPackModelLevel" }, { 0x85, 0x0081, "InternalChargeController" }, { 0x85, 0x0082, "PrimaryBatterySupport" }, { 0x85, 0x0083, "DesignCapacity" }, { 0x85, 0x0084, "SpecificationInfo" }, { 0x85, 0x0085, "ManufactureDate" }, { 0x85, 0x0086, "SerialNumber" }, { 0x85, 0x0087, "iManufacturerName" }, { 0x85, 0x0088, "iDeviceName" }, { 0x85, 0x0089, "iDeviceChemistry" }, { 0x85, 0x008a, "ManufacturerData" }, { 0x85, 0x008b, "Rechargable" }, { 0x85, 0x008c, "WarningCapacityLimit" }, { 0x85, 0x008d, "CapacityGranularity1" }, { 0x85, 0x008e, "CapacityGranularity2" }, { 0x85, 0x008f, "iOEMInformation" }, { 0x85, 0x00c0, "InhibitCharge" }, { 0x85, 0x00c1, "EnablePolling" }, { 0x85, 0x00c2, "ResetToZero" }, { 0x85, 0x00d0, "ACPresent" }, { 0x85, 0x00d1, "BatteryPresent" }, { 0x85, 0x00d2, "PowerFail" }, { 0x85, 0x00d3, "AlarmInhibited" }, { 0x85, 0x00d4, "ThermistorUnderRange" }, { 0x85, 0x00d5, "ThermistorHot" }, { 0x85, 0x00d6, "ThermistorCold" }, { 0x85, 0x00d7, "ThermistorOverRange" }, { 0x85, 0x00d8, "VoltageOutOfRange" }, { 0x85, 0x00d9, "CurrentOutOfRange" }, { 0x85, 0x00da, "CurrentNotRegulated" }, { 0x85, 0x00db, "VoltageNotRegulated" }, { 0x85, 0x00dc, "MasterMode" }, { 0x85, 0x00f0, "ChargerSelectorSupport" }, { 0x85, 0x00f1, "ChargerSpec" }, { 0x85, 0x00f2, "Level2" }, { 0x85, 0x00f3, "Level3" }, { 0x8c, 0, "BarcodeScanner" }, { 0x8c, 0x0001, "BarcodeBadgeReader" }, { 0x8c, 0x0002, "BarcodeScanner" }, { 0x8c, 0x0003, "DumbBarCodeScanner" }, { 0x8c, 0x0004, "CordlessScannerBase" }, { 0x8c, 0x0005, "BarCodeScannerCradle" }, { 0x8c, 0x0010, "AttributeReport" }, { 0x8c, 0x0011, "SettingsReport" }, { 0x8c, 0x0012, "ScannedDataReport" }, { 0x8c, 0x0013, "RawScannedDataReport" }, { 0x8c, 0x0014, "TriggerReport" }, { 0x8c, 0x0015, "StatusReport" }, { 0x8c, 0x0016, "UPCEANControlReport" }, { 0x8c, 0x0017, "EAN23LabelControlReport" }, { 0x8c, 0x0018, "Code39ControlReport" }, { 0x8c, 0x0019, "Interleaved2of5ControlReport" }, { 0x8c, 0x001a, "Standard2of5ControlReport" }, { 0x8c, 0x001b, "MSIPlesseyControlReport" }, { 0x8c, 0x001c, "CodabarControlReport" }, { 0x8c, 0x001d, "Code128ControlReport" }, { 0x8c, 0x001e, "Misc1DControlReport" }, { 0x8c, 0x001f, "2DControlReport" }, { 0x8c, 0x0030, "AimingPointerMode" }, { 0x8c, 0x0031, "BarCodePresentSensor" }, { 0x8c, 0x0032, "Class1ALaser" }, { 0x8c, 0x0033, "Class2Laser" }, { 0x8c, 0x0034, "HeaterPresent" }, { 0x8c, 0x0035, "ContactScanner" }, { 0x8c, 0x0036, "ElectronicArticleSurveillanceNotification" }, { 0x8c, 0x0037, "ConstantElectronicArticleSurveillance" }, { 0x8c, 0x0038, "ErrorIndication" }, { 0x8c, 0x0039, "FixedBeeper" }, { 0x8c, 0x003a, "GoodDecodeIndication" }, { 0x8c, 0x003b, "HandsFreeScanning" }, { 0x8c, 0x003c, "IntrinsicallySafe" }, { 0x8c, 0x003d, "KlasseEinsLaser" }, { 0x8c, 0x003e, "LongRangeScanner" }, { 0x8c, 0x003f, "MirrorSpeedControl" }, { 0x8c, 0x0040, "NotOnFileIndication" }, { 0x8c, 0x0041, "ProgrammableBeeper" }, { 0x8c, 0x0042, "Triggerless" }, { 0x8c, 0x0043, "Wand" }, { 0x8c, 0x0044, "WaterResistant" }, { 0x8c, 0x0045, "MultiRangeScanner" }, { 0x8c, 0x0046, "ProximitySensor" }, { 0x8c, 0x004d, "FragmentDecoding" }, { 0x8c, 0x004e, "ScannerReadConfidence" }, { 0x8c, 0x004f, "DataPrefix" }, { 0x8c, 0x0050, "PrefixAIMI" }, { 0x8c, 0x0051, "PrefixNone" }, { 0x8c, 0x0052, "PrefixProprietary" }, { 0x8c, 0x0055, "ActiveTime" }, { 0x8c, 0x0056, "AimingLaserPattern" }, { 0x8c, 0x0057, "BarCodePresent" }, { 0x8c, 0x0058, "BeeperState" }, { 0x8c, 0x0059, "LaserOnTime" }, { 0x8c, 0x005a, "LaserState" }, { 0x8c, 0x005b, "LockoutTime" }, { 0x8c, 0x005c, "MotorState" }, { 0x8c, 0x005d, "MotorTimeout" }, { 0x8c, 0x005e, "PowerOnResetScanner" }, { 0x8c, 0x005f, "PreventReadofBarcodes" }, { 0x8c, 0x0060, "InitiateBarcodeRead" }, { 0x8c, 0x0061, "TriggerState" }, { 0x8c, 0x0062, "TriggerMode" }, { 0x8c, 0x0063, "TriggerModeBlinkingLaserOn" }, { 0x8c, 0x0064, "TriggerModeContinuousLaserOn" }, { 0x8c, 0x0065, "TriggerModeLaseronwhilePulled" }, { 0x8c, 0x0066, "TriggerModeLaserstaysonafterrelease" }, { 0x8c, 0x006d, "CommitParameterstoNVM" }, { 0x8c, 0x006e, "ParameterScanning" }, { 0x8c, 0x006f, "ParametersChanged" }, { 0x8c, 0x0070, "Setparameterdefaultvalues" }, { 0x8c, 0x0075, "ScannerInCradle" }, { 0x8c, 0x0076, "ScannerInRange" }, { 0x8c, 0x007a, "AimDuration" }, { 0x8c, 0x007b, "GoodReadLampDuration" }, { 0x8c, 0x007c, "GoodReadLampIntensity" }, { 0x8c, 0x007d, "GoodReadLED" }, { 0x8c, 0x007e, "GoodReadToneFrequency" }, { 0x8c, 0x007f, "GoodReadToneLength" }, { 0x8c, 0x0080, "GoodReadToneVolume" }, { 0x8c, 0x0082, "NoReadMessage" }, { 0x8c, 0x0083, "NotonFileVolume" }, { 0x8c, 0x0084, "PowerupBeep" }, { 0x8c, 0x0085, "SoundErrorBeep" }, { 0x8c, 0x0086, "SoundGoodReadBeep" }, { 0x8c, 0x0087, "SoundNotOnFileBeep" }, { 0x8c, 0x0088, "GoodReadWhentoWrite" }, { 0x8c, 0x0089, "GRWTIAfterDecode" }, { 0x8c, 0x008a, "GRWTIBeepLampaftertransmit" }, { 0x8c, 0x008b, "GRWTINoBeepLampuseatall" }, { 0x8c, 0x0091, "BooklandEAN" }, { 0x8c, 0x0092, "ConvertEAN8to13Type" }, { 0x8c, 0x0093, "ConvertUPCAtoEAN13" }, { 0x8c, 0x0094, "ConvertUPCEtoA" }, { 0x8c, 0x0095, "EAN13" }, { 0x8c, 0x0096, "EAN8" }, { 0x8c, 0x0097, "EAN99128Mandatory" }, { 0x8c, 0x0098, "EAN99P5128Optional" }, { 0x8c, 0x0099, "EnableEANTwoLabel" }, { 0x8c, 0x009a, "UPCEAN" }, { 0x8c, 0x009b, "UPCEANCouponCode" }, { 0x8c, 0x009c, "UPCEANPeriodicals" }, { 0x8c, 0x009d, "UPCA" }, { 0x8c, 0x009e, "UPCAwith128Mandatory" }, { 0x8c, 0x009f, "UPCAwith128Optional" }, { 0x8c, 0x00a0, "UPCAwithP5Optional" }, { 0x8c, 0x00a1, "UPCE" }, { 0x8c, 0x00a2, "UPCE1" }, { 0x8c, 0x00a9, "Periodical" }, { 0x8c, 0x00aa, "PeriodicalAutoDiscriminate2" }, { 0x8c, 0x00ab, "PeriodicalOnlyDecodewith2" }, { 0x8c, 0x00ac, "PeriodicalIgnore2" }, { 0x8c, 0x00ad, "PeriodicalAutoDiscriminate5" }, { 0x8c, 0x00ae, "PeriodicalOnlyDecodewith5" }, { 0x8c, 0x00af, "PeriodicalIgnore5" }, { 0x8c, 0x00b0, "Check" }, { 0x8c, 0x00b1, "CheckDisablePrice" }, { 0x8c, 0x00b2, "CheckEnable4digitPrice" }, { 0x8c, 0x00b3, "CheckEnable5digitPrice" }, { 0x8c, 0x00b4, "CheckEnableEuropean4digitPrice" }, { 0x8c, 0x00b5, "CheckEnableEuropean5digitPrice" }, { 0x8c, 0x00b7, "EANTwoLabel" }, { 0x8c, 0x00b8, "EANThreeLabel" }, { 0x8c, 0x00b9, "EAN8FlagDigit1" }, { 0x8c, 0x00ba, "EAN8FlagDigit2" }, { 0x8c, 0x00bb, "EAN8FlagDigit3" }, { 0x8c, 0x00bc, "EAN13FlagDigit1" }, { 0x8c, 0x00bd, "EAN13FlagDigit2" }, { 0x8c, 0x00be, "EAN13FlagDigit3" }, { 0x8c, 0x00bf, "AddEAN23LabelDefinition" }, { 0x8c, 0x00c0, "ClearallEAN23LabelDefinitions" }, { 0x8c, 0x00c3, "Codabar" }, { 0x8c, 0x00c4, "Code128" }, { 0x8c, 0x00c7, "Code39" }, { 0x8c, 0x00c8, "Code93" }, { 0x8c, 0x00c9, "FullASCIIConversion" }, { 0x8c, 0x00ca, "Interleaved2of5" }, { 0x8c, 0x00cb, "ItalianPharmacyCode" }, { 0x8c, 0x00cc, "MSIPlessey" }, { 0x8c, 0x00cd, "Standard2of5IATA" }, { 0x8c, 0x00ce, "Standard2of5" }, { 0x8c, 0x00d3, "TransmitStartStop" }, { 0x8c, 0x00d4, "TriOptic" }, { 0x8c, 0x00d5, "UCCEAN128" }, { 0x8c, 0x00d6, "CheckDigit" }, { 0x8c, 0x00d7, "CheckDigitDisable" }, { 0x8c, 0x00d8, "CheckDigitEnableInterleaved2of5OPCC" }, { 0x8c, 0x00d9, "CheckDigitEnableInterleaved2of5USS" }, { 0x8c, 0x00da, "CheckDigitEnableStandard2of5OPCC" }, { 0x8c, 0x00db, "CheckDigitEnableStandard2of5USS" }, { 0x8c, 0x00dc, "CheckDigitEnableOneMSIPlessey" }, { 0x8c, 0x00dd, "CheckDigitEnableTwoMSIPlessey" }, { 0x8c, 0x00de, "CheckDigitCodabarEnable" }, { 0x8c, 0x00df, "CheckDigitCode39Enable" }, { 0x8c, 0x00f0, "TransmitCheckDigit" }, { 0x8c, 0x00f1, "DisableCheckDigitTransmit" }, { 0x8c, 0x00f2, "EnableCheckDigitTransmit" }, { 0x8c, 0x00fb, "SymbologyIdentifier1" }, { 0x8c, 0x00fc, "SymbologyIdentifier2" }, { 0x8c, 0x00fd, "SymbologyIdentifier3" }, { 0x8c, 0x00fe, "DecodedData" }, { 0x8c, 0x00ff, "DecodeDataContinued" }, { 0x8c, 0x0100, "BarSpaceData" }, { 0x8c, 0x0101, "ScannerDataAccuracy" }, { 0x8c, 0x0102, "RawDataPolarity" }, { 0x8c, 0x0103, "PolarityInvertedBarCode" }, { 0x8c, 0x0104, "PolarityNormalBarCode" }, { 0x8c, 0x0106, "MinimumLengthtoDecode" }, { 0x8c, 0x0107, "MaximumLengthtoDecode" }, { 0x8c, 0x0108, "DiscreteLengthtoDecode1" }, { 0x8c, 0x0109, "DiscreteLengthtoDecode2" }, { 0x8c, 0x010a, "DataLengthMethod" }, { 0x8c, 0x010b, "DLMethodReadany" }, { 0x8c, 0x010c, "DLMethodCheckinRange" }, { 0x8c, 0x010d, "DLMethodCheckforDiscrete" }, { 0x8c, 0x0110, "AztecCode" }, { 0x8c, 0x0111, "BC412" }, { 0x8c, 0x0112, "ChannelCode" }, { 0x8c, 0x0113, "Code16" }, { 0x8c, 0x0114, "Code32" }, { 0x8c, 0x0115, "Code49" }, { 0x8c, 0x0116, "CodeOne" }, { 0x8c, 0x0117, "Colorcode" }, { 0x8c, 0x0118, "DataMatrix" }, { 0x8c, 0x0119, "MaxiCode" }, { 0x8c, 0x011a, "MicroPDF" }, { 0x8c, 0x011b, "PDF417" }, { 0x8c, 0x011c, "PosiCode" }, { 0x8c, 0x011d, "QRCode" }, { 0x8c, 0x011e, "SuperCode" }, { 0x8c, 0x011f, "UltraCode" }, { 0x8c, 0x0120, "USD5SlugCode" }, { 0x8c, 0x0121, "VeriCode" }, { 0x8d, 0, "Scales" }, { 0x8d, 0x0001, "Scales" }, { 0x8d, 0x0020, "ScaleDevice" }, { 0x8d, 0x0021, "ScaleClass" }, { 0x8d, 0x0022, "ScaleClassIMetric" }, { 0x8d, 0x0023, "ScaleClassIIMetric" }, { 0x8d, 0x0024, "ScaleClassIIIMetric" }, { 0x8d, 0x0025, "ScaleClassIIILMetric" }, { 0x8d, 0x0026, "ScaleClassIVMetric" }, { 0x8d, 0x0027, "ScaleClassIIIEnglish" }, { 0x8d, 0x0028, "ScaleClassIIILEnglish" }, { 0x8d, 0x0029, "ScaleClassIVEnglish" }, { 0x8d, 0x002a, "ScaleClassGeneric" }, { 0x8d, 0x0030, "ScaleAttributeReport" }, { 0x8d, 0x0031, "ScaleControlReport" }, { 0x8d, 0x0032, "ScaleDataReport" }, { 0x8d, 0x0033, "ScaleStatusReport" }, { 0x8d, 0x0034, "ScaleWeightLimitReport" }, { 0x8d, 0x0035, "ScaleStatisticsReport" }, { 0x8d, 0x0040, "DataWeight" }, { 0x8d, 0x0041, "DataScaling" }, { 0x8d, 0x0050, "WeightUnit" }, { 0x8d, 0x0051, "WeightUnitMilligram" }, { 0x8d, 0x0052, "WeightUnitGram" }, { 0x8d, 0x0053, "WeightUnitKilogram" }, { 0x8d, 0x0054, "WeightUnitCarats" }, { 0x8d, 0x0055, "WeightUnitTaels" }, { 0x8d, 0x0056, "WeightUnitGrains" }, { 0x8d, 0x0057, "WeightUnitPennyweights" }, { 0x8d, 0x0058, "WeightUnitMetricTon" }, { 0x8d, 0x0059, "WeightUnitAvoirTon" }, { 0x8d, 0x005a, "WeightUnitTroyOunce" }, { 0x8d, 0x005b, "WeightUnitOunce" }, { 0x8d, 0x005c, "WeightUnitPound" }, { 0x8d, 0x0060, "CalibrationCount" }, { 0x8d, 0x0061, "ReZeroCount" }, { 0x8d, 0x0070, "ScaleStatus" }, { 0x8d, 0x0071, "ScaleStatusFault" }, { 0x8d, 0x0072, "ScaleStatusStableatCenterofZero" }, { 0x8d, 0x0073, "ScaleStatusInMotion" }, { 0x8d, 0x0074, "ScaleStatusWeightStable" }, { 0x8d, 0x0075, "ScaleStatusUnderZero" }, { 0x8d, 0x0076, "ScaleStatusOverWeightLimit" }, { 0x8d, 0x0077, "ScaleStatusRequiresCalibration" }, { 0x8d, 0x0078, "ScaleStatusRequiresRezeroing" }, { 0x8d, 0x0080, "ZeroScale" }, { 0x8d, 0x0081, "EnforcedZeroReturn" }, { 0x8e, 0, "MagneticStripeReader" }, { 0x8e, 0x0001, "MSRDeviceReadOnly" }, { 0x8e, 0x0011, "Track1Length" }, { 0x8e, 0x0012, "Track2Length" }, { 0x8e, 0x0013, "Track3Length" }, { 0x8e, 0x0014, "TrackJISLength" }, { 0x8e, 0x0020, "TrackData" }, { 0x8e, 0x0021, "Track1Data" }, { 0x8e, 0x0022, "Track2Data" }, { 0x8e, 0x0023, "Track3Data" }, { 0x8e, 0x0024, "TrackJISData" }, { 0x90, 0, "CameraControl" }, { 0x90, 0x0020, "CameraAutofocus" }, { 0x90, 0x0021, "CameraShutter" }, { 0x91, 0, "Arcade" }, { 0x91, 0x0001, "GeneralPurposeIOCard" }, { 0x91, 0x0002, "CoinDoor" }, { 0x91, 0x0003, "WatchdogTimer" }, { 0x91, 0x0030, "GeneralPurposeAnalogInputState" }, { 0x91, 0x0031, "GeneralPurposeDigitalInputState" }, { 0x91, 0x0032, "GeneralPurposeOpticalInputState" }, { 0x91, 0x0033, "GeneralPurposeDigitalOutputState" }, { 0x91, 0x0034, "NumberofCoinDoors" }, { 0x91, 0x0035, "CoinDrawerDropCount" }, { 0x91, 0x0036, "CoinDrawerStart" }, { 0x91, 0x0037, "CoinDrawerService" }, { 0x91, 0x0038, "CoinDrawerTilt" }, { 0x91, 0x0039, "CoinDoorTest" }, { 0x91, 0x0040, "CoinDoorLockout" }, { 0x91, 0x0041, "WatchdogTimeout" }, { 0x91, 0x0042, "WatchdogAction" }, { 0x91, 0x0043, "WatchdogReboot" }, { 0x91, 0x0044, "WatchdogRestart" }, { 0x91, 0x0045, "AlarmInput" }, { 0x91, 0x0046, "CoinDoorCounter" }, { 0x91, 0x0047, "IODirectionMapping" }, { 0x91, 0x0048, "SetIODirectionMapping" }, { 0x91, 0x0049, "ExtendedOpticalInputState" }, { 0x91, 0x004a, "PinPadInputState" }, { 0x91, 0x004b, "PinPadStatus" }, { 0x91, 0x004c, "PinPadOutput" }, { 0x91, 0x004d, "PinPadCommand" }, { 0xf1d0, 0, "FIDOAlliance" }, { 0xf1d0, 0x0001, "U2FAuthenticatorDevice" }, { 0xf1d0, 0x0020, "InputReportData" }, { 0xf1d0, 0x0021, "OutputReportData" }, /* pages 0xff00 to 0xffff are vendor-specific */ { 0xffff, 0, "Vendor-specific-FF" }, { 0, 0, NULL } }; /* Either output directly into simple seq_file, or (if f == NULL) * allocate a separate buffer that will then be passed to the 'events' * ringbuffer. * * This is because these functions can be called both for "one-shot" * "rdesc" while resolving, or for blocking "events". * * This holds both for resolv_usage_page() and hid_resolv_usage(). */ static char *resolv_usage_page(unsigned page, struct seq_file *f) { const struct hid_usage_entry *p; char *buf = NULL; if (!f) { buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC); if (!buf) return ERR_PTR(-ENOMEM); } for (p = hid_usage_table; p->description; p++) if (p->page == page) { if (!f) { snprintf(buf, HID_DEBUG_BUFSIZE, "%s", p->description); return buf; } else { seq_printf(f, "%s", p->description); return NULL; } } if (!f) snprintf(buf, HID_DEBUG_BUFSIZE, "%04x", page); else seq_printf(f, "%04x", page); return buf; } char *hid_resolv_usage(unsigned usage, struct seq_file *f) { const struct hid_usage_entry *p; const struct hid_usage_entry *m; char *buf = NULL; int len = 0; const char *modifier = NULL; unsigned int usage_modifier = usage & 0xF000; unsigned int usage_actual = usage & 0xFFFF; buf = resolv_usage_page(usage >> 16, f); if (IS_ERR(buf)) { pr_err("error allocating HID debug buffer\n"); return NULL; } if (!f) { len = strlen(buf); len += scnprintf(buf + len, HID_DEBUG_BUFSIZE - len, "."); } else { seq_printf(f, "."); } for (p = hid_usage_table; p->description; p++) if (p->page == (usage >> 16)) { if (p->page == 0x20 && usage_modifier) { for (m = p; m->description; m++) { if (p->page == m->page && m->usage == usage_modifier) { modifier = m->description; break; } } if (modifier) usage_actual = usage_actual & 0x0FFF; } if (!modifier) modifier = ""; for(++p; p->description && p->usage != 0; p++) if (p->usage == usage_actual) { if (!f) snprintf(buf + len, HID_DEBUG_BUFSIZE - len, "%s%s", p->description, modifier); else seq_printf(f, "%s%s", p->description, modifier); return buf; } break; } if (!f) snprintf(buf + len, HID_DEBUG_BUFSIZE - len, "%04x", usage & 0xffff); else seq_printf(f, "%04x", usage & 0xffff); return buf; } EXPORT_SYMBOL_GPL(hid_resolv_usage); static void tab(int n, struct seq_file *f) { seq_printf(f, "%*s", n, ""); } void hid_dump_field(struct hid_field *field, int n, struct seq_file *f) { int j; if (field->physical) { tab(n, f); seq_printf(f, "Physical("); hid_resolv_usage(field->physical, f); seq_printf(f, ")\n"); } if (field->logical) { tab(n, f); seq_printf(f, "Logical("); hid_resolv_usage(field->logical, f); seq_printf(f, ")\n"); } if (field->application) { tab(n, f); seq_printf(f, "Application("); hid_resolv_usage(field->application, f); seq_printf(f, ")\n"); } tab(n, f); seq_printf(f, "Usage(%d)\n", field->maxusage); for (j = 0; j < field->maxusage; j++) { tab(n+2, f); hid_resolv_usage(field->usage[j].hid, f); seq_printf(f, "\n"); } if (field->logical_minimum != field->logical_maximum) { tab(n, f); seq_printf(f, "Logical Minimum(%d)\n", field->logical_minimum); tab(n, f); seq_printf(f, "Logical Maximum(%d)\n", field->logical_maximum); } if (field->physical_minimum != field->physical_maximum) { tab(n, f); seq_printf(f, "Physical Minimum(%d)\n", field->physical_minimum); tab(n, f); seq_printf(f, "Physical Maximum(%d)\n", field->physical_maximum); } if (field->unit_exponent) { tab(n, f); seq_printf(f, "Unit Exponent(%d)\n", field->unit_exponent); } if (field->unit) { static const char *systems[5] = { "None", "SI Linear", "SI Rotation", "English Linear", "English Rotation" }; static const char *units[5][8] = { { "None", "None", "None", "None", "None", "None", "None", "None" }, { "None", "Centimeter", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" }, { "None", "Radians", "Gram", "Seconds", "Kelvin", "Ampere", "Candela", "None" }, { "None", "Inch", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" }, { "None", "Degrees", "Slug", "Seconds", "Fahrenheit", "Ampere", "Candela", "None" } }; int i; int sys; __u32 data = field->unit; /* First nibble tells us which system we're in. */ sys = data & 0xf; data >>= 4; if(sys > 4) { tab(n, f); seq_printf(f, "Unit(Invalid)\n"); } else { int earlier_unit = 0; tab(n, f); seq_printf(f, "Unit(%s : ", systems[sys]); for (i=1 ; i<sizeof(__u32)*2 ; i++) { char nibble = data & 0xf; data >>= 4; if (nibble != 0) { if(earlier_unit++ > 0) seq_printf(f, "*"); seq_printf(f, "%s", units[sys][i]); if(nibble != 1) { /* This is a _signed_ nibble(!) */ int val = nibble & 0x7; if(nibble & 0x08) val = -((0x7 & ~val) +1); seq_printf(f, "^%d", val); } } } seq_printf(f, ")\n"); } } tab(n, f); seq_printf(f, "Report Size(%u)\n", field->report_size); tab(n, f); seq_printf(f, "Report Count(%u)\n", field->report_count); tab(n, f); seq_printf(f, "Report Offset(%u)\n", field->report_offset); tab(n, f); seq_printf(f, "Flags( "); j = field->flags; seq_printf(f, "%s", HID_MAIN_ITEM_CONSTANT & j ? "Constant " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_VARIABLE & j ? "Variable " : "Array "); seq_printf(f, "%s", HID_MAIN_ITEM_RELATIVE & j ? "Relative " : "Absolute "); seq_printf(f, "%s", HID_MAIN_ITEM_WRAP & j ? "Wrap " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_NONLINEAR & j ? "NonLinear " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_NO_PREFERRED & j ? "NoPreferredState " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_NULL_STATE & j ? "NullState " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_VOLATILE & j ? "Volatile " : ""); seq_printf(f, "%s", HID_MAIN_ITEM_BUFFERED_BYTE & j ? "BufferedByte " : ""); seq_printf(f, ")\n"); } EXPORT_SYMBOL_GPL(hid_dump_field); void hid_dump_device(struct hid_device *device, struct seq_file *f) { struct hid_report_enum *report_enum; struct hid_report *report; struct list_head *list; unsigned i,k; static const char *table[] = {"INPUT", "OUTPUT", "FEATURE"}; for (i = 0; i < HID_REPORT_TYPES; i++) { report_enum = device->report_enum + i; list = report_enum->report_list.next; while (list != &report_enum->report_list) { report = (struct hid_report *) list; tab(2, f); seq_printf(f, "%s", table[i]); if (report->id) seq_printf(f, "(%d)", report->id); seq_printf(f, "[%s]", table[report->type]); seq_printf(f, "\n"); for (k = 0; k < report->maxfield; k++) { tab(4, f); seq_printf(f, "Field(%d)\n", k); hid_dump_field(report->field[k], 6, f); } list = list->next; } } } EXPORT_SYMBOL_GPL(hid_dump_device); /* enqueue string to 'events' ring buffer */ void hid_debug_event(struct hid_device *hdev, char *buf) { struct hid_debug_list *list; unsigned long flags; spin_lock_irqsave(&hdev->debug_list_lock, flags); list_for_each_entry(list, &hdev->debug_list, node) kfifo_in(&list->hid_debug_fifo, buf, strlen(buf)); spin_unlock_irqrestore(&hdev->debug_list_lock, flags); wake_up_interruptible(&hdev->debug_wait); } EXPORT_SYMBOL_GPL(hid_debug_event); void hid_dump_report(struct hid_device *hid, int type, u8 *data, int size) { struct hid_report_enum *report_enum; char *buf; unsigned int i; buf = kmalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC); if (!buf) return; report_enum = hid->report_enum + type; /* dump the report */ snprintf(buf, HID_DEBUG_BUFSIZE - 1, "\nreport (size %u) (%snumbered) = ", size, report_enum->numbered ? "" : "un"); hid_debug_event(hid, buf); for (i = 0; i < size; i++) { snprintf(buf, HID_DEBUG_BUFSIZE - 1, " %02x", data[i]); hid_debug_event(hid, buf); } hid_debug_event(hid, "\n"); kfree(buf); } EXPORT_SYMBOL_GPL(hid_dump_report); void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 value) { char *buf; int len; buf = hid_resolv_usage(usage->hid, NULL); if (!buf) return; len = strlen(buf); snprintf(buf + len, HID_DEBUG_BUFSIZE - len - 1, " = %d\n", value); hid_debug_event(hdev, buf); kfree(buf); wake_up_interruptible(&hdev->debug_wait); } EXPORT_SYMBOL_GPL(hid_dump_input); static const char *events[EV_MAX + 1] = { [EV_SYN] = "Sync", [EV_KEY] = "Key", [EV_REL] = "Relative", [EV_ABS] = "Absolute", [EV_MSC] = "Misc", [EV_LED] = "LED", [EV_SND] = "Sound", [EV_REP] = "Repeat", [EV_FF] = "ForceFeedback", [EV_PWR] = "Power", [EV_FF_STATUS] = "ForceFeedbackStatus", [EV_SW] = "Software", }; static const char *syncs[SYN_CNT] = { [SYN_REPORT] = "Report", [SYN_CONFIG] = "Config", [SYN_MT_REPORT] = "MT Report", [SYN_DROPPED] = "Dropped", }; static const char *keys[KEY_MAX + 1] = { [KEY_RESERVED] = "Reserved", [KEY_ESC] = "Esc", [KEY_1] = "1", [KEY_2] = "2", [KEY_3] = "3", [KEY_4] = "4", [KEY_5] = "5", [KEY_6] = "6", [KEY_7] = "7", [KEY_8] = "8", [KEY_9] = "9", [KEY_0] = "0", [KEY_MINUS] = "Minus", [KEY_EQUAL] = "Equal", [KEY_BACKSPACE] = "Backspace", [KEY_TAB] = "Tab", [KEY_Q] = "Q", [KEY_W] = "W", [KEY_E] = "E", [KEY_R] = "R", [KEY_T] = "T", [KEY_Y] = "Y", [KEY_U] = "U", [KEY_I] = "I", [KEY_O] = "O", [KEY_P] = "P", [KEY_LEFTBRACE] = "LeftBrace", [KEY_RIGHTBRACE] = "RightBrace", [KEY_ENTER] = "Enter", [KEY_LEFTCTRL] = "LeftControl", [KEY_A] = "A", [KEY_S] = "S", [KEY_D] = "D", [KEY_F] = "F", [KEY_G] = "G", [KEY_H] = "H", [KEY_J] = "J", [KEY_K] = "K", [KEY_L] = "L", [KEY_SEMICOLON] = "Semicolon", [KEY_APOSTROPHE] = "Apostrophe", [KEY_GRAVE] = "Grave", [KEY_LEFTSHIFT] = "LeftShift", [KEY_BACKSLASH] = "BackSlash", [KEY_Z] = "Z", [KEY_X] = "X", [KEY_C] = "C", [KEY_V] = "V", [KEY_B] = "B", [KEY_N] = "N", [KEY_M] = "M", [KEY_COMMA] = "Comma", [KEY_DOT] = "Dot", [KEY_SLASH] = "Slash", [KEY_RIGHTSHIFT] = "RightShift", [KEY_KPASTERISK] = "KPAsterisk", [KEY_LEFTALT] = "LeftAlt", [KEY_SPACE] = "Space", [KEY_CAPSLOCK] = "CapsLock", [KEY_F1] = "F1", [KEY_F2] = "F2", [KEY_F3] = "F3", [KEY_F4] = "F4", [KEY_F5] = "F5", [KEY_F6] = "F6", [KEY_F7] = "F7", [KEY_F8] = "F8", [KEY_F9] = "F9", [KEY_F10] = "F10", [KEY_NUMLOCK] = "NumLock", [KEY_SCROLLLOCK] = "ScrollLock", [KEY_KP7] = "KP7", [KEY_KP8] = "KP8", [KEY_KP9] = "KP9", [KEY_KPMINUS] = "KPMinus", [KEY_KP4] = "KP4", [KEY_KP5] = "KP5", [KEY_KP6] = "KP6", [KEY_KPPLUS] = "KPPlus", [KEY_KP1] = "KP1", [KEY_KP2] = "KP2", [KEY_KP3] = "KP3", [KEY_KP0] = "KP0", [KEY_KPDOT] = "KPDot", [KEY_ZENKAKUHANKAKU] = "Zenkaku/Hankaku", [KEY_102ND] = "102nd", [KEY_F11] = "F11", [KEY_F12] = "F12", [KEY_RO] = "RO", [KEY_KATAKANA] = "Katakana", [KEY_HIRAGANA] = "HIRAGANA", [KEY_HENKAN] = "Henkan", [KEY_KATAKANAHIRAGANA] = "Katakana/Hiragana", [KEY_MUHENKAN] = "Muhenkan", [KEY_KPJPCOMMA] = "KPJpComma", [KEY_KPENTER] = "KPEnter", [KEY_RIGHTCTRL] = "RightCtrl", [KEY_KPSLASH] = "KPSlash", [KEY_SYSRQ] = "SysRq", [KEY_RIGHTALT] = "RightAlt", [KEY_LINEFEED] = "LineFeed", [KEY_HOME] = "Home", [KEY_UP] = "Up", [KEY_PAGEUP] = "PageUp", [KEY_LEFT] = "Left", [KEY_RIGHT] = "Right", [KEY_END] = "End", [KEY_DOWN] = "Down", [KEY_PAGEDOWN] = "PageDown", [KEY_INSERT] = "Insert", [KEY_DELETE] = "Delete", [KEY_MACRO] = "Macro", [KEY_MUTE] = "Mute", [KEY_VOLUMEDOWN] = "VolumeDown", [KEY_VOLUMEUP] = "VolumeUp", [KEY_POWER] = "Power", [KEY_KPEQUAL] = "KPEqual", [KEY_KPPLUSMINUS] = "KPPlusMinus", [KEY_PAUSE] = "Pause", [KEY_KPCOMMA] = "KPComma", [KEY_HANGUEL] = "Hangeul", [KEY_HANJA] = "Hanja", [KEY_YEN] = "Yen", [KEY_LEFTMETA] = "LeftMeta", [KEY_RIGHTMETA] = "RightMeta", [KEY_COMPOSE] = "Compose", [KEY_STOP] = "Stop", [KEY_AGAIN] = "Again", [KEY_PROPS] = "Props", [KEY_UNDO] = "Undo", [KEY_FRONT] = "Front", [KEY_COPY] = "Copy", [KEY_OPEN] = "Open", [KEY_PASTE] = "Paste", [KEY_FIND] = "Find", [KEY_CUT] = "Cut", [KEY_HELP] = "Help", [KEY_MENU] = "Menu", [KEY_CALC] = "Calc", [KEY_SETUP] = "Setup", [KEY_SLEEP] = "Sleep", [KEY_WAKEUP] = "WakeUp", [KEY_FILE] = "File", [KEY_SENDFILE] = "SendFile", [KEY_DELETEFILE] = "DeleteFile", [KEY_XFER] = "X-fer", [KEY_PROG1] = "Prog1", [KEY_PROG2] = "Prog2", [KEY_WWW] = "WWW", [KEY_MSDOS] = "MSDOS", [KEY_COFFEE] = "Coffee", [KEY_ROTATE_DISPLAY] = "RotateDisplay", [KEY_CYCLEWINDOWS] = "CycleWindows", [KEY_MAIL] = "Mail", [KEY_BOOKMARKS] = "Bookmarks", [KEY_COMPUTER] = "Computer", [KEY_BACK] = "Back", [KEY_FORWARD] = "Forward", [KEY_CLOSECD] = "CloseCD", [KEY_EJECTCD] = "EjectCD", [KEY_EJECTCLOSECD] = "EjectCloseCD", [KEY_NEXTSONG] = "NextSong", [KEY_PLAYPAUSE] = "PlayPause", [KEY_PREVIOUSSONG] = "PreviousSong", [KEY_STOPCD] = "StopCD", [KEY_RECORD] = "Record", [KEY_REWIND] = "Rewind", [KEY_PHONE] = "Phone", [KEY_ISO] = "ISOKey", [KEY_CONFIG] = "Config", [KEY_HOMEPAGE] = "HomePage", [KEY_REFRESH] = "Refresh", [KEY_EXIT] = "Exit", [KEY_MOVE] = "Move", [KEY_EDIT] = "Edit", [KEY_SCROLLUP] = "ScrollUp", [KEY_SCROLLDOWN] = "ScrollDown", [KEY_KPLEFTPAREN] = "KPLeftParenthesis", [KEY_KPRIGHTPAREN] = "KPRightParenthesis", [KEY_NEW] = "New", [KEY_REDO] = "Redo", [KEY_F13] = "F13", [KEY_F14] = "F14", [KEY_F15] = "F15", [KEY_F16] = "F16", [KEY_F17] = "F17", [KEY_F18] = "F18", [KEY_F19] = "F19", [KEY_F20] = "F20", [KEY_F21] = "F21", [KEY_F22] = "F22", [KEY_F23] = "F23", [KEY_F24] = "F24", [KEY_PLAYCD] = "PlayCD", [KEY_PAUSECD] = "PauseCD", [KEY_PROG3] = "Prog3", [KEY_PROG4] = "Prog4", [KEY_ALL_APPLICATIONS] = "AllApplications", [KEY_SUSPEND] = "Suspend", [KEY_CLOSE] = "Close", [KEY_PLAY] = "Play", [KEY_FASTFORWARD] = "FastForward", [KEY_BASSBOOST] = "BassBoost", [KEY_PRINT] = "Print", [KEY_HP] = "HP", [KEY_CAMERA] = "Camera", [KEY_SOUND] = "Sound", [KEY_QUESTION] = "Question", [KEY_EMAIL] = "Email", [KEY_CHAT] = "Chat", [KEY_SEARCH] = "Search", [KEY_CONNECT] = "Connect", [KEY_FINANCE] = "Finance", [KEY_SPORT] = "Sport", [KEY_SHOP] = "Shop", [KEY_ALTERASE] = "AlternateErase", [KEY_CANCEL] = "Cancel", [KEY_BRIGHTNESSDOWN] = "BrightnessDown", [KEY_BRIGHTNESSUP] = "BrightnessUp", [KEY_MEDIA] = "Media", [KEY_UNKNOWN] = "Unknown", [BTN_DPAD_UP] = "BtnDPadUp", [BTN_DPAD_DOWN] = "BtnDPadDown", [BTN_DPAD_LEFT] = "BtnDPadLeft", [BTN_DPAD_RIGHT] = "BtnDPadRight", [BTN_0] = "Btn0", [BTN_1] = "Btn1", [BTN_2] = "Btn2", [BTN_3] = "Btn3", [BTN_4] = "Btn4", [BTN_5] = "Btn5", [BTN_6] = "Btn6", [BTN_7] = "Btn7", [BTN_8] = "Btn8", [BTN_9] = "Btn9", [BTN_LEFT] = "LeftBtn", [BTN_RIGHT] = "RightBtn", [BTN_MIDDLE] = "MiddleBtn", [BTN_SIDE] = "SideBtn", [BTN_EXTRA] = "ExtraBtn", [BTN_FORWARD] = "ForwardBtn", [BTN_BACK] = "BackBtn", [BTN_TASK] = "TaskBtn", [BTN_TRIGGER] = "Trigger", [BTN_THUMB] = "ThumbBtn", [BTN_THUMB2] = "ThumbBtn2", [BTN_TOP] = "TopBtn", [BTN_TOP2] = "TopBtn2", [BTN_PINKIE] = "PinkieBtn", [BTN_BASE] = "BaseBtn", [BTN_BASE2] = "BaseBtn2", [BTN_BASE3] = "BaseBtn3", [BTN_BASE4] = "BaseBtn4", [BTN_BASE5] = "BaseBtn5", [BTN_BASE6] = "BaseBtn6", [BTN_DEAD] = "BtnDead", [BTN_A] = "BtnA", [BTN_B] = "BtnB", [BTN_C] = "BtnC", [BTN_X] = "BtnX", [BTN_Y] = "BtnY", [BTN_Z] = "BtnZ", [BTN_TL] = "BtnTL", [BTN_TR] = "BtnTR", [BTN_TL2] = "BtnTL2", [BTN_TR2] = "BtnTR2", [BTN_SELECT] = "BtnSelect", [BTN_START] = "BtnStart", [BTN_MODE] = "BtnMode", [BTN_THUMBL] = "BtnThumbL", [BTN_THUMBR] = "BtnThumbR", [BTN_GRIPL] = "BtnGripL", [BTN_GRIPR] = "BtnGripR", [BTN_GRIPL2] = "BtnGripL2", [BTN_GRIPR2] = "BtnGripR2", [BTN_TOOL_PEN] = "ToolPen", [BTN_TOOL_RUBBER] = "ToolRubber", [BTN_TOOL_BRUSH] = "ToolBrush", [BTN_TOOL_PENCIL] = "ToolPencil", [BTN_TOOL_AIRBRUSH] = "ToolAirbrush", [BTN_TOOL_FINGER] = "ToolFinger", [BTN_TOOL_MOUSE] = "ToolMouse", [BTN_TOOL_LENS] = "ToolLens", [BTN_TOUCH] = "Touch", [BTN_STYLUS] = "Stylus", [BTN_STYLUS2] = "Stylus2", [BTN_TOOL_DOUBLETAP] = "ToolDoubleTap", [BTN_TOOL_TRIPLETAP] = "ToolTripleTap", [BTN_TOOL_QUADTAP] = "ToolQuadrupleTap", [BTN_GEAR_DOWN] = "BtnGearDown", [BTN_GEAR_UP] = "BtnGearUp", [KEY_OK] = "Ok", [KEY_SELECT] = "Select", [KEY_GOTO] = "Goto", [KEY_CLEAR] = "Clear", [KEY_POWER2] = "Power2", [KEY_OPTION] = "Option", [KEY_INFO] = "Info", [KEY_TIME] = "Time", [KEY_VENDOR] = "Vendor", [KEY_ARCHIVE] = "Archive", [KEY_PROGRAM] = "Program", [KEY_CHANNEL] = "Channel", [KEY_FAVORITES] = "Favorites", [KEY_EPG] = "EPG", [KEY_PVR] = "PVR", [KEY_MHP] = "MHP", [KEY_LANGUAGE] = "Language", [KEY_TITLE] = "Title", [KEY_SUBTITLE] = "Subtitle", [KEY_ANGLE] = "Angle", [KEY_MODE] = "Mode", [KEY_KEYBOARD] = "Keyboard", [KEY_PC] = "PC", [KEY_TV] = "TV", [KEY_TV2] = "TV2", [KEY_VCR] = "VCR", [KEY_VCR2] = "VCR2", [KEY_SAT] = "Sat", [KEY_SAT2] = "Sat2", [KEY_CD] = "CD", [KEY_TAPE] = "Tape", [KEY_RADIO] = "Radio", [KEY_TUNER] = "Tuner", [KEY_PLAYER] = "Player", [KEY_TEXT] = "Text", [KEY_DVD] = "DVD", [KEY_AUX] = "Aux", [KEY_MP3] = "MP3", [KEY_AUDIO] = "Audio", [KEY_VIDEO] = "Video", [KEY_DIRECTORY] = "Directory", [KEY_LIST] = "List", [KEY_MEMO] = "Memo", [KEY_CALENDAR] = "Calendar", [KEY_RED] = "Red", [KEY_GREEN] = "Green", [KEY_YELLOW] = "Yellow", [KEY_BLUE] = "Blue", [KEY_CHANNELUP] = "ChannelUp", [KEY_CHANNELDOWN] = "ChannelDown", [KEY_FIRST] = "First", [KEY_LAST] = "Last", [KEY_AB] = "AB", [KEY_NEXT] = "Next", [KEY_RESTART] = "Restart", [KEY_SLOW] = "Slow", [KEY_SHUFFLE] = "Shuffle", [KEY_BREAK] = "Break", [KEY_PREVIOUS] = "Previous", [KEY_DIGITS] = "Digits", [KEY_TEEN] = "TEEN", [KEY_TWEN] = "TWEN", [KEY_DEL_EOL] = "DeleteEOL", [KEY_DEL_EOS] = "DeleteEOS", [KEY_INS_LINE] = "InsertLine", [KEY_DEL_LINE] = "DeleteLine", [KEY_SEND] = "Send", [KEY_REPLY] = "Reply", [KEY_FORWARDMAIL] = "ForwardMail", [KEY_SAVE] = "Save", [KEY_DOCUMENTS] = "Documents", [KEY_SPELLCHECK] = "SpellCheck", [KEY_LOGOFF] = "Logoff", [KEY_FN] = "Fn", [KEY_FN_ESC] = "Fn+ESC", [KEY_FN_1] = "Fn+1", [KEY_FN_2] = "Fn+2", [KEY_FN_B] = "Fn+B", [KEY_FN_D] = "Fn+D", [KEY_FN_E] = "Fn+E", [KEY_FN_F] = "Fn+F", [KEY_FN_S] = "Fn+S", [KEY_FN_F1] = "Fn+F1", [KEY_FN_F2] = "Fn+F2", [KEY_FN_F3] = "Fn+F3", [KEY_FN_F4] = "Fn+F4", [KEY_FN_F5] = "Fn+F5", [KEY_FN_F6] = "Fn+F6", [KEY_FN_F7] = "Fn+F7", [KEY_FN_F8] = "Fn+F8", [KEY_FN_F9] = "Fn+F9", [KEY_FN_F10] = "Fn+F10", [KEY_FN_F11] = "Fn+F11", [KEY_FN_F12] = "Fn+F12", [KEY_KBDILLUMTOGGLE] = "KbdIlluminationToggle", [KEY_KBDILLUMDOWN] = "KbdIlluminationDown", [KEY_KBDILLUMUP] = "KbdIlluminationUp", [KEY_SWITCHVIDEOMODE] = "SwitchVideoMode", [KEY_BUTTONCONFIG] = "ButtonConfig", [KEY_TASKMANAGER] = "TaskManager", [KEY_JOURNAL] = "Journal", [KEY_CONTROLPANEL] = "ControlPanel", [KEY_APPSELECT] = "AppSelect", [KEY_SCREENSAVER] = "ScreenSaver", [KEY_VOICECOMMAND] = "VoiceCommand", [KEY_ASSISTANT] = "Assistant", [KEY_KBD_LAYOUT_NEXT] = "KbdLayoutNext", [KEY_EMOJI_PICKER] = "EmojiPicker", [KEY_CAMERA_ACCESS_ENABLE] = "CameraAccessEnable", [KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable", [KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle", [KEY_ACCESSIBILITY] = "Accessibility", [KEY_DO_NOT_DISTURB] = "DoNotDisturb", [KEY_DICTATE] = "Dictate", [KEY_MICMUTE] = "MicrophoneMute", [KEY_BRIGHTNESS_MIN] = "BrightnessMin", [KEY_BRIGHTNESS_MAX] = "BrightnessMax", [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", [KEY_KBDINPUTASSIST_PREV] = "KbdInputAssistPrev", [KEY_KBDINPUTASSIST_NEXT] = "KbdInputAssistNext", [KEY_KBDINPUTASSIST_PREVGROUP] = "KbdInputAssistPrevGroup", [KEY_KBDINPUTASSIST_NEXTGROUP] = "KbdInputAssistNextGroup", [KEY_KBDINPUTASSIST_ACCEPT] = "KbdInputAssistAccept", [KEY_KBDINPUTASSIST_CANCEL] = "KbdInputAssistCancel", [KEY_MACRO1] = "Macro1", [KEY_MACRO2] = "Macro2", [KEY_MACRO3] = "Macro3", [KEY_MACRO4] = "Macro4", [KEY_MACRO5] = "Macro5", [KEY_MACRO6] = "Macro6", [KEY_MACRO7] = "Macro7", [KEY_MACRO8] = "Macro8", [KEY_MACRO9] = "Macro9", [KEY_MACRO10] = "Macro10", [KEY_MACRO11] = "Macro11", [KEY_MACRO12] = "Macro12", [KEY_MACRO13] = "Macro13", [KEY_MACRO14] = "Macro14", [KEY_MACRO15] = "Macro15", [KEY_MACRO16] = "Macro16", [KEY_MACRO17] = "Macro17", [KEY_MACRO18] = "Macro18", [KEY_MACRO19] = "Macro19", [KEY_MACRO20] = "Macro20", [KEY_MACRO21] = "Macro21", [KEY_MACRO22] = "Macro22", [KEY_MACRO23] = "Macro23", [KEY_MACRO24] = "Macro24", [KEY_MACRO25] = "Macro25", [KEY_MACRO26] = "Macro26", [KEY_MACRO27] = "Macro27", [KEY_MACRO28] = "Macro28", [KEY_MACRO29] = "Macro29", [KEY_MACRO30] = "Macro30", [BTN_TRIGGER_HAPPY1] = "TriggerHappy1", [BTN_TRIGGER_HAPPY2] = "TriggerHappy2", [BTN_TRIGGER_HAPPY3] = "TriggerHappy3", [BTN_TRIGGER_HAPPY4] = "TriggerHappy4", [BTN_TRIGGER_HAPPY5] = "TriggerHappy5", [BTN_TRIGGER_HAPPY6] = "TriggerHappy6", [BTN_TRIGGER_HAPPY7] = "TriggerHappy7", [BTN_TRIGGER_HAPPY8] = "TriggerHappy8", [BTN_TRIGGER_HAPPY9] = "TriggerHappy9", [BTN_TRIGGER_HAPPY10] = "TriggerHappy10", [BTN_TRIGGER_HAPPY11] = "TriggerHappy11", [BTN_TRIGGER_HAPPY12] = "TriggerHappy12", [BTN_TRIGGER_HAPPY13] = "TriggerHappy13", [BTN_TRIGGER_HAPPY14] = "TriggerHappy14", [BTN_TRIGGER_HAPPY15] = "TriggerHappy15", [BTN_TRIGGER_HAPPY16] = "TriggerHappy16", [BTN_TRIGGER_HAPPY17] = "TriggerHappy17", [BTN_TRIGGER_HAPPY18] = "TriggerHappy18", [BTN_TRIGGER_HAPPY19] = "TriggerHappy19", [BTN_TRIGGER_HAPPY20] = "TriggerHappy20", [BTN_TRIGGER_HAPPY21] = "TriggerHappy21", [BTN_TRIGGER_HAPPY22] = "TriggerHappy22", [BTN_TRIGGER_HAPPY23] = "TriggerHappy23", [BTN_TRIGGER_HAPPY24] = "TriggerHappy24", [BTN_TRIGGER_HAPPY25] = "TriggerHappy25", [BTN_TRIGGER_HAPPY26] = "TriggerHappy26", [BTN_TRIGGER_HAPPY27] = "TriggerHappy27", [BTN_TRIGGER_HAPPY28] = "TriggerHappy28", [BTN_TRIGGER_HAPPY29] = "TriggerHappy29", [BTN_TRIGGER_HAPPY30] = "TriggerHappy30", [BTN_TRIGGER_HAPPY31] = "TriggerHappy31", [BTN_TRIGGER_HAPPY32] = "TriggerHappy32", [BTN_TRIGGER_HAPPY33] = "TriggerHappy33", [BTN_TRIGGER_HAPPY34] = "TriggerHappy34", [BTN_TRIGGER_HAPPY35] = "TriggerHappy35", [BTN_TRIGGER_HAPPY36] = "TriggerHappy36", [BTN_TRIGGER_HAPPY37] = "TriggerHappy37", [BTN_TRIGGER_HAPPY38] = "TriggerHappy38", [BTN_TRIGGER_HAPPY39] = "TriggerHappy39", [BTN_TRIGGER_HAPPY40] = "TriggerHappy40", [BTN_STYLUS3] = "Stylus3", [BTN_TOOL_QUINTTAP] = "ToolQuintTap", [KEY_10CHANNELSDOWN] = "10ChannelsDown", [KEY_10CHANNELSUP] = "10ChannelsUp", [KEY_3D_MODE] = "3DMode", [KEY_ADDRESSBOOK] = "Addressbook", [KEY_ALS_TOGGLE] = "ALSToggle", [KEY_ASPECT_RATIO] = "AspectRatio", [KEY_ATTENDANT_OFF] = "AttendantOff", [KEY_ATTENDANT_ON] = "AttendantOn", [KEY_ATTENDANT_TOGGLE] = "AttendantToggle", [KEY_AUDIO_DESC] = "AudioDesc", [KEY_AUTOPILOT_ENGAGE_TOGGLE] = "AutoPiloteEngage", [KEY_BATTERY] = "Battery", [KEY_BLUETOOTH] = "BlueTooth", [KEY_BRIGHTNESS_CYCLE] = "BrightnessCycle", [KEY_BRIGHTNESS_MENU] = "BrightnessMenu", [KEY_BRL_DOT1] = "BrlDot1", [KEY_BRL_DOT10] = "BrlDot10", [KEY_BRL_DOT2] = "BrlDot2", [KEY_BRL_DOT3] = "BrlDot3", [KEY_BRL_DOT4] = "BrlDot4", [KEY_BRL_DOT5] = "BrlDot5", [KEY_BRL_DOT6] = "BrlDot6", [KEY_BRL_DOT7] = "BrlDot7", [KEY_BRL_DOT8] = "BrlDot8", [KEY_BRL_DOT9] = "BrlDot9", [KEY_CAMERA_DOWN] = "CameraDown", [KEY_CAMERA_FOCUS] = "CameraFocus", [KEY_CAMERA_LEFT] = "CameraLeft", [KEY_CAMERA_RIGHT] = "CameraRight", [KEY_CAMERA_UP] = "CameraUp", [KEY_CAMERA_ZOOMIN] = "CameraZoomIn", [KEY_CAMERA_ZOOMOUT] = "CameraZoomOut", [KEY_CLEARVU_SONAR] = "ClearVUSonar", [KEY_CONTEXT_MENU] = "ContextMenu", [KEY_DATA] = "Data", [KEY_DATABASE] = "DataBase", [KEY_DISPLAY_OFF] = "DisplayOff", [KEY_DISPLAYTOGGLE] = "DisplayToggle", [KEY_DOLLAR] = "Dollar", [KEY_DUAL_RANGE_RADAR] = "DualRangeRadat", [KEY_EDITOR] = "Editor", [KEY_EURO] = "Euro", [KEY_FASTREVERSE] = "FastReverse", [KEY_FISHING_CHART] = "FishingChart", [KEY_FN_RIGHT_SHIFT] = "FnRightShift", [KEY_FRAMEBACK] = "FrameBack", [KEY_FRAMEFORWARD] = "FrameForward", [KEY_FULL_SCREEN] = "FullScreen", [KEY_GAMES] = "Games", [KEY_GRAPHICSEDITOR] = "GraphicsEditor", [KEY_HANGUP_PHONE] = "HangUpPhone", [KEY_IMAGES] = "Images", [KEY_KBD_LCD_MENU1] = "KbdLcdMenu1", [KEY_KBD_LCD_MENU2] = "KbdLcdMenu2", [KEY_KBD_LCD_MENU3] = "KbdLcdMenu3", [KEY_KBD_LCD_MENU4] = "KbdLcdMenu4", [KEY_KBD_LCD_MENU5] = "KbdLcdMenu5", [KEY_LEFT_DOWN] = "LeftDown", [KEY_LEFT_UP] = "LeftUp", [KEY_LIGHTS_TOGGLE] = "LightToggle", [KEY_MACRO_PRESET1] = "MacroPreset1", [KEY_MACRO_PRESET2] = "MacroPreset2", [KEY_MACRO_PRESET3] = "MacroPrest3", [KEY_MACRO_PRESET_CYCLE] = "MacroPresetCycle", [KEY_MACRO_RECORD_START] = "MacroRecordStart", [KEY_MACRO_RECORD_STOP] = "MacroRecordStop", [KEY_MARK_WAYPOINT] = "MarkWayPoint", [KEY_MEDIA_REPEAT] = "MediaRepeat", [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messenger", [KEY_NAV_CHART] = "NavChar", [KEY_NAV_INFO] = "NavInfo", [KEY_NEWS] = "News", [KEY_NEXT_ELEMENT] = "NextElement", [KEY_NEXT_FAVORITE] = "NextFavorite", [KEY_NOTIFICATION_CENTER] = "NotificationCenter", [KEY_NUMERIC_0] = "Numeric0", [KEY_NUMERIC_1] = "Numeric1", [KEY_NUMERIC_11] = "Numceric11", [KEY_NUMERIC_12] = "Numeric12", [KEY_NUMERIC_2] = "Numeric2", [KEY_NUMERIC_3] = "Numeric3", [KEY_NUMERIC_4] = "Numeric4", [KEY_NUMERIC_5] = "Numeric5", [KEY_NUMERIC_6] = "Numeric6", [KEY_NUMERIC_7] = "Numeric7", [KEY_NUMERIC_8] = "Numeric8", [KEY_NUMERIC_9] = "Numeric9", [KEY_NUMERIC_A] = "NumericA", [KEY_NUMERIC_B] = "NumericB", [KEY_NUMERIC_C] = "NumericC", [KEY_NUMERIC_D] = "NumericD", [KEY_NUMERIC_POUND] = "NumericPound", [KEY_NUMERIC_STAR] = "NumericStar", [KEY_ONSCREEN_KEYBOARD] = "OnScreenKeyBoard", [KEY_PAUSE_RECORD] = "PauseRecord", [KEY_PICKUP_PHONE] = "PickUpPhone", [KEY_PRESENTATION] = "Presentation", [KEY_PREVIOUS_ELEMENT] = "PreviousElement", [KEY_PRIVACY_SCREEN_TOGGLE] = "PrivacyScreenToggle", [KEY_RADAR_OVERLAY] = "RadarOverLay", [KEY_RFKILL] = "RFKill", [KEY_RIGHT_DOWN] = "RightDown", [KEY_RIGHT_UP] = "RightUp", [KEY_ROOT_MENU] = "RootMenu", [KEY_ROTATE_LOCK_TOGGLE] = "RotateLockToggle", [KEY_SCALE] = "Scale", [KEY_SELECTIVE_SCREENSHOT] = "SelectiveScreenshot", [KEY_SIDEVU_SONAR] = "SideVUSonar", [KEY_SINGLE_RANGE_RADAR] = "SingleRangeRadar", [KEY_SLOWREVERSE] = "SlowReverse", [KEY_SOS] = "SOS", [KEY_SPREADSHEET] = "SpreadSheet", [KEY_STOP_RECORD] = "StopRecord", [KEY_TOUCHPAD_OFF] = "TouchPadOff", [KEY_TOUCHPAD_ON] = "TouchPadOn", [KEY_TOUCHPAD_TOGGLE] = "TouchPadToggle", [KEY_TRADITIONAL_SONAR] = "TraditionalSonar", [KEY_UNMUTE] = "Unmute", [KEY_UWB] = "UWB", [KEY_VIDEO_NEXT] = "VideoNext", [KEY_VIDEOPHONE] = "VideoPhone", [KEY_VIDEO_PREV] = "VideoPrev", [KEY_VOD] = "VOD", [KEY_VOICEMAIL] = "VoiceMail", [KEY_WLAN] = "WLAN", [KEY_WORDPROCESSOR] = "WordProcessor", [KEY_WPS_BUTTON] = "WPSButton", [KEY_WWAN] = "WWAN", [KEY_ZOOMIN] = "ZoomIn", [KEY_ZOOMOUT] = "ZoomOut", [KEY_ZOOMRESET] = "ZoomReset", }; static const char *relatives[REL_MAX + 1] = { [REL_X] = "X", [REL_Y] = "Y", [REL_Z] = "Z", [REL_RX] = "Rx", [REL_RY] = "Ry", [REL_RZ] = "Rz", [REL_HWHEEL] = "HWheel", [REL_DIAL] = "Dial", [REL_WHEEL] = "Wheel", [REL_MISC] = "Misc", [REL_WHEEL_HI_RES] = "WheelHiRes", [REL_HWHEEL_HI_RES] = "HWheelHiRes" }; static const char *absolutes[ABS_CNT] = { [ABS_X] = "X", [ABS_Y] = "Y", [ABS_Z] = "Z", [ABS_RX] = "Rx", [ABS_RY] = "Ry", [ABS_RZ] = "Rz", [ABS_THROTTLE] = "Throttle", [ABS_RUDDER] = "Rudder", [ABS_WHEEL] = "Wheel", [ABS_GAS] = "Gas", [ABS_BRAKE] = "Brake", [ABS_HAT0X] = "Hat0X", [ABS_HAT0Y] = "Hat0Y", [ABS_HAT1X] = "Hat1X", [ABS_HAT1Y] = "Hat1Y", [ABS_HAT2X] = "Hat2X", [ABS_HAT2Y] = "Hat2Y", [ABS_HAT3X] = "Hat3X", [ABS_HAT3Y] = "Hat 3Y", [ABS_PRESSURE] = "Pressure", [ABS_DISTANCE] = "Distance", [ABS_TILT_X] = "XTilt", [ABS_TILT_Y] = "YTilt", [ABS_TOOL_WIDTH] = "ToolWidth", [ABS_VOLUME] = "Volume", [ABS_PROFILE] = "Profile", [ABS_MISC] = "Misc", [ABS_MT_SLOT] = "MTSlot", [ABS_MT_TOUCH_MAJOR] = "MTMajor", [ABS_MT_TOUCH_MINOR] = "MTMinor", [ABS_MT_WIDTH_MAJOR] = "MTMajorW", [ABS_MT_WIDTH_MINOR] = "MTMinorW", [ABS_MT_ORIENTATION] = "MTOrientation", [ABS_MT_POSITION_X] = "MTPositionX", [ABS_MT_POSITION_Y] = "MTPositionY", [ABS_MT_TOOL_TYPE] = "MTToolType", [ABS_MT_BLOB_ID] = "MTBlobID", [ABS_MT_TRACKING_ID] = "MTTrackingID", [ABS_MT_PRESSURE] = "MTPressure", [ABS_MT_DISTANCE] = "MTDistance", [ABS_MT_TOOL_X] = "MTToolX", [ABS_MT_TOOL_Y] = "MTToolY", }; static const char *misc[MSC_MAX + 1] = { [MSC_SERIAL] = "Serial", [MSC_PULSELED] = "Pulseled", [MSC_GESTURE] = "Gesture", [MSC_RAW] = "RawData", [MSC_SCAN] = "Scan", [MSC_TIMESTAMP] = "TimeStamp", }; static const char *leds[LED_MAX + 1] = { [LED_NUML] = "NumLock", [LED_CAPSL] = "CapsLock", [LED_SCROLLL] = "ScrollLock", [LED_COMPOSE] = "Compose", [LED_KANA] = "Kana", [LED_SLEEP] = "Sleep", [LED_SUSPEND] = "Suspend", [LED_MUTE] = "Mute", [LED_MISC] = "Misc", [LED_MAIL] = "Mail", [LED_CHARGING] = "Charging", }; static const char *repeats[REP_MAX + 1] = { [REP_DELAY] = "Delay", [REP_PERIOD] = "Period" }; static const char *sounds[SND_MAX + 1] = { [SND_CLICK] = "Click", [SND_BELL] = "Bell", [SND_TONE] = "Tone" }; static const char *software[SW_CNT] = { [SW_LID] = "Lid", [SW_TABLET_MODE] = "TabletMode", [SW_HEADPHONE_INSERT] = "HeadPhoneInsert", [SW_RFKILL_ALL] = "RFKillAll", [SW_MICROPHONE_INSERT] = "MicrophoneInsert", [SW_DOCK] = "Dock", [SW_LINEOUT_INSERT] = "LineOutInsert", [SW_JACK_PHYSICAL_INSERT] = "JackPhysicalInsert", [SW_VIDEOOUT_INSERT] = "VideoOutInsert", [SW_CAMERA_LENS_COVER] = "CameraLensCover", [SW_KEYPAD_SLIDE] = "KeyPadSlide", [SW_FRONT_PROXIMITY] = "FrontProximity", [SW_ROTATE_LOCK] = "RotateLock", [SW_LINEIN_INSERT] = "LineInInsert", [SW_MUTE_DEVICE] = "MuteDevice", [SW_PEN_INSERTED] = "PenInserted", [SW_MACHINE_COVER] = "MachineCover", }; static const char *force[FF_CNT] = { [FF_RUMBLE] = "FF_RUMBLE", [FF_PERIODIC] = "FF_PERIODIC", [FF_CONSTANT] = "FF_CONSTANT", [FF_SPRING] = "FF_SPRING", [FF_FRICTION] = "FF_FRICTION", [FF_DAMPER] = "FF_DAMPER", [FF_INERTIA] = "FF_INERTIA", [FF_RAMP] = "FF_RAMP", [FF_SQUARE] = "FF_SQUARE", [FF_TRIANGLE] = "FF_TRIANGLE", [FF_SINE] = "FF_SINE", [FF_SAW_UP] = "FF_SAW_UP", [FF_SAW_DOWN] = "FF_SAW_DOWN", [FF_CUSTOM] = "FF_CUSTOM", [FF_GAIN] = "FF_GAIN", [FF_AUTOCENTER] = "FF_AUTOCENTER", [FF_MAX] = "FF_MAX", }; static const char *force_status[FF_STATUS_MAX + 1] = { [FF_STATUS_STOPPED] = "FF_STATUS_STOPPED", [FF_STATUS_PLAYING] = "FF_STATUS_PLAYING", }; static const char **names[EV_MAX + 1] = { [EV_SYN] = syncs, [EV_KEY] = keys, [EV_REL] = relatives, [EV_ABS] = absolutes, [EV_MSC] = misc, [EV_LED] = leds, [EV_SND] = sounds, [EV_REP] = repeats, [EV_SW] = software, [EV_FF] = force, [EV_FF_STATUS] = force_status, }; static void hid_resolv_event(__u8 type, __u16 code, struct seq_file *f) { if (events[type]) seq_printf(f, "%s.", events[type]); else seq_printf(f, "%02x.", type); if (names[type] && names[type][code]) seq_printf(f, "%s", names[type][code]); else seq_printf(f, "%04x", code); } static void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f) { int i, j, k; struct hid_report *report; struct hid_usage *usage; for (k = HID_INPUT_REPORT; k <= HID_OUTPUT_REPORT; k++) { list_for_each_entry(report, &hid->report_enum[k].report_list, list) { for (i = 0; i < report->maxfield; i++) { for ( j = 0; j < report->field[i]->maxusage; j++) { usage = report->field[i]->usage + j; hid_resolv_usage(usage->hid, f); seq_printf(f, " ---> "); hid_resolv_event(usage->type, usage->code, f); seq_printf(f, "\n"); } } } } } static int hid_debug_rdesc_show(struct seq_file *f, void *p) { struct hid_device *hdev = f->private; const __u8 *rdesc = hdev->rdesc; unsigned rsize = hdev->rsize; int i; if (!rdesc) { rdesc = hdev->dev_rdesc; rsize = hdev->dev_rsize; } /* dump HID report descriptor */ for (i = 0; i < rsize; i++) seq_printf(f, "%02x ", rdesc[i]); seq_printf(f, "\n\n"); /* dump parsed data and input mappings */ if (down_interruptible(&hdev->driver_input_lock)) return 0; hid_dump_device(hdev, f); seq_printf(f, "\n"); hid_dump_input_mapping(hdev, f); up(&hdev->driver_input_lock); return 0; } static int hid_debug_events_open(struct inode *inode, struct file *file) { int err = 0; struct hid_debug_list *list; unsigned long flags; if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) { err = -ENOMEM; goto out; } err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL); if (err) { kfree(list); goto out; } list->hdev = (struct hid_device *) inode->i_private; kref_get(&list->hdev->ref); file->private_data = list; mutex_init(&list->read_mutex); spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_add_tail(&list->node, &list->hdev->debug_list); spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); out: return err; } static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { struct hid_debug_list *list = file->private_data; int ret = 0, copied; DECLARE_WAITQUEUE(wait, current); mutex_lock(&list->read_mutex); if (kfifo_is_empty(&list->hid_debug_fifo)) { add_wait_queue(&list->hdev->debug_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); while (kfifo_is_empty(&list->hid_debug_fifo)) { if (signal_pending(current)) { ret = -ERESTARTSYS; break; } /* if list->hdev is NULL we cannot remove_wait_queue(). * if list->hdev->debug is 0 then hid_debug_unregister() * was already called and list->hdev is being destroyed. * if we add remove_wait_queue() here we can hit a race. */ if (!list->hdev || !list->hdev->debug) { ret = -EIO; __set_current_state(TASK_RUNNING); goto out; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } /* allow O_NONBLOCK from other threads */ mutex_unlock(&list->read_mutex); schedule(); mutex_lock(&list->read_mutex); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); remove_wait_queue(&list->hdev->debug_wait, &wait); if (ret) goto out; } /* pass the fifo content to userspace, locking is not needed with only * one concurrent reader and one concurrent writer */ ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied); if (ret) goto out; ret = copied; out: mutex_unlock(&list->read_mutex); return ret; } static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait) { struct hid_debug_list *list = file->private_data; poll_wait(file, &list->hdev->debug_wait, wait); if (!kfifo_is_empty(&list->hid_debug_fifo)) return EPOLLIN | EPOLLRDNORM; if (!list->hdev->debug) return EPOLLERR | EPOLLHUP; return 0; } static int hid_debug_events_release(struct inode *inode, struct file *file) { struct hid_debug_list *list = file->private_data; unsigned long flags; spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_del(&list->node); spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); kfifo_free(&list->hid_debug_fifo); kref_put(&list->hdev->ref, hiddev_free); kfree(list); return 0; } DEFINE_SHOW_ATTRIBUTE(hid_debug_rdesc); static const struct file_operations hid_debug_events_fops = { .owner = THIS_MODULE, .open = hid_debug_events_open, .read = hid_debug_events_read, .poll = hid_debug_events_poll, .release = hid_debug_events_release, .llseek = noop_llseek, }; void hid_debug_register(struct hid_device *hdev, const char *name) { hdev->debug_dir = debugfs_create_dir(name, hid_debug_root); hdev->debug_rdesc = debugfs_create_file("rdesc", 0400, hdev->debug_dir, hdev, &hid_debug_rdesc_fops); hdev->debug_events = debugfs_create_file("events", 0400, hdev->debug_dir, hdev, &hid_debug_events_fops); hdev->debug = 1; } void hid_debug_unregister(struct hid_device *hdev) { hdev->debug = 0; wake_up_interruptible(&hdev->debug_wait); debugfs_remove(hdev->debug_rdesc); debugfs_remove(hdev->debug_events); debugfs_remove(hdev->debug_dir); } void hid_debug_init(void) { hid_debug_root = debugfs_create_dir("hid", NULL); } void hid_debug_exit(void) { debugfs_remove_recursive(hid_debug_root); } |
| 1 12 11 903 22 340 19 234 358 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FILELOCK_H #define _LINUX_FILELOCK_H #include <linux/fs.h> #define FL_POSIX 1 #define FL_FLOCK 2 #define FL_DELEG 4 /* NFSv4 delegation */ #define FL_ACCESS 8 /* not trying to lock, just looking */ #define FL_EXISTS 16 /* when unlocking, test for existence */ #define FL_LEASE 32 /* lease held on this file */ #define FL_CLOSE 64 /* unlock on close */ #define FL_SLEEP 128 /* A blocking lock */ #define FL_DOWNGRADE_PENDING 256 /* Lease is being downgraded */ #define FL_UNLOCK_PENDING 512 /* Lease is being broken */ #define FL_OFDLCK 1024 /* lock is "owned" by struct file */ #define FL_LAYOUT 2048 /* outstanding pNFS layout */ #define FL_RECLAIM 4096 /* reclaiming from a reboot server */ #define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) /* * Special return value from posix_lock_file() and vfs_lock_file() for * asynchronous locking. */ #define FILE_LOCK_DEFERRED 1 struct file_lock; struct file_lease; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); void (*fl_release_private)(struct file_lock *); }; struct lock_manager_operations { void *lm_mod_owner; fl_owner_t (*lm_get_owner)(fl_owner_t); void (*lm_put_owner)(fl_owner_t); void (*lm_notify)(struct file_lock *); /* unblock callback */ int (*lm_grant)(struct file_lock *, int); bool (*lm_lock_expirable)(struct file_lock *cfl); void (*lm_expire_lock)(void); }; struct lease_manager_operations { bool (*lm_break)(struct file_lease *); int (*lm_change)(struct file_lease *, int, struct list_head *); void (*lm_setup)(struct file_lease *, void **); bool (*lm_breaker_owns_lease)(struct file_lease *); }; struct lock_manager { struct list_head list; /* * NFSv4 and up also want opens blocked during the grace period; * NLM doesn't care: */ bool block_opens; }; struct net; void locks_start_grace(struct net *, struct lock_manager *); void locks_end_grace(struct lock_manager *); bool locks_in_grace(struct net *); bool opens_in_grace(struct net *); /* * struct file_lock has a union that some filesystems use to track * their own private info. The NFS side of things is defined here: */ #include <linux/nfs_fs_i.h> /* * struct file_lock represents a generic "file lock". It's used to represent * POSIX byte range locks, BSD (flock) locks, and leases. It's important to * note that the same struct is used to represent both a request for a lock and * the lock itself, but the same object is never used for both. * * FIXME: should we create a separate "struct lock_request" to help distinguish * these two uses? * * The varous i_flctx lists are ordered by: * * 1) lock owner * 2) lock range start * 3) lock range end * * Obviously, the last two criteria only matter for POSIX locks. */ struct file_lock_core { struct file_lock_core *flc_blocker; /* The lock that is blocking us */ struct list_head flc_list; /* link into file_lock_context */ struct hlist_node flc_link; /* node in global lists */ struct list_head flc_blocked_requests; /* list of requests with * ->fl_blocker pointing here */ struct list_head flc_blocked_member; /* node in * ->fl_blocker->fl_blocked_requests */ fl_owner_t flc_owner; unsigned int flc_flags; unsigned char flc_type; pid_t flc_pid; int flc_link_cpu; /* what cpu's list is this on? */ wait_queue_head_t flc_wait; struct file *flc_file; }; struct file_lock { struct file_lock_core c; loff_t fl_start; loff_t fl_end; const struct file_lock_operations *fl_ops; /* Callbacks for filesystems */ const struct lock_manager_operations *fl_lmops; /* Callbacks for lockmanagers */ union { struct nfs_lock_info nfs_fl; struct nfs4_lock_info nfs4_fl; struct { struct list_head link; /* link in AFS vnode's pending_locks list */ int state; /* state of grant or error if -ve */ unsigned int debug_id; } afs; struct { struct inode *inode; } ceph; } fl_u; } __randomize_layout; struct file_lease { struct file_lock_core c; struct fasync_struct * fl_fasync; /* for lease break notifications */ /* for lease breaks: */ unsigned long fl_break_time; unsigned long fl_downgrade_time; const struct lease_manager_operations *fl_lmops; /* Callbacks for lease managers */ } __randomize_layout; struct file_lock_context { spinlock_t flc_lock; struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; }; #ifdef CONFIG_FILE_LOCKING int fcntl_getlk(struct file *, unsigned int, struct flock *); int fcntl_setlk(unsigned int, struct file *, unsigned int, struct flock *); #if BITS_PER_LONG == 32 int fcntl_getlk64(struct file *, unsigned int, struct flock64 *); int fcntl_setlk64(unsigned int, struct file *, unsigned int, struct flock64 *); #endif int fcntl_setlease(unsigned int fd, struct file *filp, int arg); int fcntl_getlease(struct file *filp); static inline bool lock_is_unlock(struct file_lock *fl) { return fl->c.flc_type == F_UNLCK; } static inline bool lock_is_read(struct file_lock *fl) { return fl->c.flc_type == F_RDLCK; } static inline bool lock_is_write(struct file_lock *fl) { return fl->c.flc_type == F_WRLCK; } static inline void locks_wake_up_waiter(struct file_lock_core *flc) { wake_up(&flc->flc_wait); } static inline void locks_wake_up(struct file_lock *fl) { locks_wake_up_waiter(&fl->c); } static inline bool locks_can_async_lock(const struct file_operations *fops) { return !fops->lock || fops->fop_flags & FOP_ASYNC_LOCK; } /* fs/locks.c */ void locks_free_lock_context(struct inode *inode); void locks_free_lock(struct file_lock *fl); void locks_init_lock(struct file_lock *); struct file_lock *locks_alloc_lock(void); void locks_copy_lock(struct file_lock *, struct file_lock *); void locks_copy_conflock(struct file_lock *, struct file_lock *); void locks_remove_posix(struct file *, fl_owner_t); void locks_remove_file(struct file *); void locks_release_private(struct file_lock *); void posix_test_lock(struct file *, struct file_lock *); int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); int locks_delete_block(struct file_lock *); int vfs_test_lock(struct file *, struct file_lock *); int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); int vfs_cancel_lock(struct file *filp, struct file_lock *fl); bool vfs_inode_has_locks(struct inode *inode); int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl); void locks_init_lease(struct file_lease *); void locks_free_lease(struct file_lease *fl); struct file_lease *locks_alloc_lease(void); int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); void lease_get_mtime(struct inode *, struct timespec64 *time); int generic_setlease(struct file *, int, struct file_lease **, void **priv); int kernel_setlease(struct file *, int, struct file_lease **, void **); int vfs_setlease(struct file *, int, struct file_lease **, void **); int lease_modify(struct file_lease *, int, struct list_head *); struct notifier_block; int lease_register_notifier(struct notifier_block *); void lease_unregister_notifier(struct notifier_block *); struct files_struct; void show_fd_locks(struct seq_file *f, struct file *filp, struct files_struct *files); bool locks_owner_has_blockers(struct file_lock_context *flctx, fl_owner_t owner); static inline struct file_lock_context * locks_inode_context(const struct inode *inode) { return smp_load_acquire(&inode->i_flctx); } #else /* !CONFIG_FILE_LOCKING */ static inline int fcntl_getlk(struct file *file, unsigned int cmd, struct flock __user *user) { return -EINVAL; } static inline int fcntl_setlk(unsigned int fd, struct file *file, unsigned int cmd, struct flock __user *user) { return -EACCES; } #if BITS_PER_LONG == 32 static inline int fcntl_getlk64(struct file *file, unsigned int cmd, struct flock64 *user) { return -EINVAL; } static inline int fcntl_setlk64(unsigned int fd, struct file *file, unsigned int cmd, struct flock64 *user) { return -EACCES; } #endif static inline int fcntl_setlease(unsigned int fd, struct file *filp, int arg) { return -EINVAL; } static inline int fcntl_getlease(struct file *filp) { return F_UNLCK; } static inline bool lock_is_unlock(struct file_lock *fl) { return false; } static inline bool lock_is_read(struct file_lock *fl) { return false; } static inline bool lock_is_write(struct file_lock *fl) { return false; } static inline void locks_wake_up(struct file_lock *fl) { } static inline void locks_free_lock_context(struct inode *inode) { } static inline void locks_init_lock(struct file_lock *fl) { return; } static inline void locks_init_lease(struct file_lease *fl) { return; } static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) { return; } static inline void locks_copy_lock(struct file_lock *new, struct file_lock *fl) { return; } static inline void locks_remove_posix(struct file *filp, fl_owner_t owner) { return; } static inline void locks_remove_file(struct file *filp) { return; } static inline void posix_test_lock(struct file *filp, struct file_lock *fl) { return; } static inline int posix_lock_file(struct file *filp, struct file_lock *fl, struct file_lock *conflock) { return -ENOLCK; } static inline int locks_delete_block(struct file_lock *waiter) { return -ENOENT; } static inline int vfs_test_lock(struct file *filp, struct file_lock *fl) { return 0; } static inline int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) { return -ENOLCK; } static inline int vfs_cancel_lock(struct file *filp, struct file_lock *fl) { return 0; } static inline bool vfs_inode_has_locks(struct inode *inode) { return false; } static inline int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) { return -ENOLCK; } static inline int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) { return 0; } static inline void lease_get_mtime(struct inode *inode, struct timespec64 *time) { return; } static inline int generic_setlease(struct file *filp, int arg, struct file_lease **flp, void **priv) { return -EINVAL; } static inline int kernel_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv) { return -EINVAL; } static inline int vfs_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv) { return -EINVAL; } static inline int lease_modify(struct file_lease *fl, int arg, struct list_head *dispose) { return -EINVAL; } struct files_struct; static inline void show_fd_locks(struct seq_file *f, struct file *filp, struct files_struct *files) {} static inline bool locks_owner_has_blockers(struct file_lock_context *flctx, fl_owner_t owner) { return false; } static inline struct file_lock_context * locks_inode_context(const struct inode *inode) { return NULL; } #endif /* !CONFIG_FILE_LOCKING */ /* for walking lists of file_locks linked by fl_list */ #define for_each_file_lock(_fl, _head) list_for_each_entry(_fl, _head, c.flc_list) static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) { return locks_lock_inode_wait(file_inode(filp), fl); } #ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { struct file_lock_context *flctx; /* * Since this check is lockless, we must ensure that any refcounts * taken are done before checking i_flctx->flc_lease. Otherwise, we * could end up racing with tasks trying to set a new lease on this * file. */ flctx = READ_ONCE(inode->i_flctx); if (!flctx) return 0; smp_mb(); if (!list_empty_careful(&flctx->flc_lease)) return __break_lease(inode, mode, FL_LEASE); return 0; } static inline int break_deleg(struct inode *inode, unsigned int mode) { struct file_lock_context *flctx; /* * Since this check is lockless, we must ensure that any refcounts * taken are done before checking i_flctx->flc_lease. Otherwise, we * could end up racing with tasks trying to set a new lease on this * file. */ flctx = READ_ONCE(inode->i_flctx); if (!flctx) return 0; smp_mb(); if (!list_empty_careful(&flctx->flc_lease)) return __break_lease(inode, mode, FL_DELEG); return 0; } static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode) { int ret; ret = break_deleg(inode, O_WRONLY|O_NONBLOCK); if (ret == -EWOULDBLOCK && delegated_inode) { *delegated_inode = inode; ihold(inode); } return ret; } static inline int break_deleg_wait(struct inode **delegated_inode) { int ret; ret = break_deleg(*delegated_inode, O_WRONLY); iput(*delegated_inode); *delegated_inode = NULL; return ret; } static inline int break_layout(struct inode *inode, bool wait) { smp_mb(); if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) return __break_lease(inode, wait ? O_WRONLY : O_WRONLY | O_NONBLOCK, FL_LAYOUT); return 0; } #else /* !CONFIG_FILE_LOCKING */ static inline int break_lease(struct inode *inode, unsigned int mode) { return 0; } static inline int break_deleg(struct inode *inode, unsigned int mode) { return 0; } static inline int try_break_deleg(struct inode *inode, struct inode **delegated_inode) { return 0; } static inline int break_deleg_wait(struct inode **delegated_inode) { BUG(); return 0; } static inline int break_layout(struct inode *inode, bool wait) { return 0; } #endif /* CONFIG_FILE_LOCKING */ #endif /* _LINUX_FILELOCK_H */ |
| 7 3 2 2 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <linux/types.h> #include <net/net_namespace.h> #include <net/netlink.h> #include <linux/in6.h> #include <net/ip.h> int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family, struct netlink_ext_ack *extack) { *ip_proto = nla_get_u8(attr); switch (*ip_proto) { case IPPROTO_TCP: case IPPROTO_UDP: return 0; case IPPROTO_ICMP: if (family != AF_INET) break; return 0; #if IS_ENABLED(CONFIG_IPV6) case IPPROTO_ICMPV6: if (family != AF_INET6) break; return 0; #endif } NL_SET_ERR_MSG(extack, "Unsupported ip proto"); return -EOPNOTSUPP; } EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto); |
| 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) International Business Machines Corp., 2006 * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* * UBI attaching sub-system. * * This sub-system is responsible for attaching MTD devices and it also * implements flash media scanning. * * The attaching information is represented by a &struct ubi_attach_info' * object. Information about volumes is represented by &struct ubi_ainf_volume * objects which are kept in volume RB-tree with root at the @volumes field. * The RB-tree is indexed by the volume ID. * * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These * objects are kept in per-volume RB-trees with the root at the corresponding * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of * per-volume objects and each of these objects is the root of RB-tree of * per-LEB objects. * * Corrupted physical eraseblocks are put to the @corr list, free physical * eraseblocks are put to the @free list and the physical eraseblock to be * erased are put to the @erase list. * * About corruptions * ~~~~~~~~~~~~~~~~~ * * UBI protects EC and VID headers with CRC-32 checksums, so it can detect * whether the headers are corrupted or not. Sometimes UBI also protects the * data with CRC-32, e.g., when it executes the atomic LEB change operation, or * when it moves the contents of a PEB for wear-leveling purposes. * * UBI tries to distinguish between 2 types of corruptions. * * 1. Corruptions caused by power cuts. These are expected corruptions and UBI * tries to handle them gracefully, without printing too many warnings and * error messages. The idea is that we do not lose important data in these * cases - we may lose only the data which were being written to the media just * before the power cut happened, and the upper layers (e.g., UBIFS) are * supposed to handle such data losses (e.g., by using the FS journal). * * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like * the reason is a power cut, UBI puts this PEB to the @erase list, and all * PEBs in the @erase list are scheduled for erasure later. * * 2. Unexpected corruptions which are not caused by power cuts. During * attaching, such PEBs are put to the @corr list and UBI preserves them. * Obviously, this lessens the amount of available PEBs, and if at some point * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs * about such PEBs every time the MTD device is attached. * * However, it is difficult to reliably distinguish between these types of * corruptions and UBI's strategy is as follows (in case of attaching by * scanning). UBI assumes corruption type 2 if the VID header is corrupted and * the data area does not contain all 0xFFs, and there were no bit-flips or * integrity errors (e.g., ECC errors in case of NAND) while reading the data * area. Otherwise UBI assumes corruption type 1. So the decision criteria * are as follows. * o If the data area contains only 0xFFs, there are no data, and it is safe * to just erase this PEB - this is corruption type 1. * o If the data area has bit-flips or data integrity errors (ECC errors on * NAND), it is probably a PEB which was being erased when power cut * happened, so this is corruption type 1. However, this is just a guess, * which might be wrong. * o Otherwise this is corruption type 2. */ #include <linux/err.h> #include <linux/slab.h> #include <linux/crc32.h> #include <linux/math64.h> #include <linux/random.h> #include "ubi.h" static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai); #define AV_FIND BIT(0) #define AV_ADD BIT(1) #define AV_FIND_OR_ADD (AV_FIND | AV_ADD) /** * find_or_add_av - internal function to find a volume, add a volume or do * both (find and add if missing). * @ai: attaching information * @vol_id: the requested volume ID * @flags: a combination of the %AV_FIND and %AV_ADD flags describing the * expected operation. If only %AV_ADD is set, -EEXIST is returned * if the volume already exists. If only %AV_FIND is set, NULL is * returned if the volume does not exist. And if both flags are * set, the helper first tries to find an existing volume, and if * it does not exist it creates a new one. * @created: in value used to inform the caller whether it"s a newly created * volume or not. * * This function returns a pointer to a volume description or an ERR_PTR if * the operation failed. It can also return NULL if only %AV_FIND is set and * the volume does not exist. */ static struct ubi_ainf_volume *find_or_add_av(struct ubi_attach_info *ai, int vol_id, unsigned int flags, bool *created) { struct ubi_ainf_volume *av; struct rb_node **p = &ai->volumes.rb_node, *parent = NULL; /* Walk the volume RB-tree to look if this volume is already present */ while (*p) { parent = *p; av = rb_entry(parent, struct ubi_ainf_volume, rb); if (vol_id == av->vol_id) { *created = false; if (!(flags & AV_FIND)) return ERR_PTR(-EEXIST); return av; } if (vol_id > av->vol_id) p = &(*p)->rb_left; else p = &(*p)->rb_right; } if (!(flags & AV_ADD)) return NULL; /* The volume is absent - add it */ av = kzalloc(sizeof(*av), GFP_KERNEL); if (!av) return ERR_PTR(-ENOMEM); av->vol_id = vol_id; if (vol_id > ai->highest_vol_id) ai->highest_vol_id = vol_id; rb_link_node(&av->rb, parent, p); rb_insert_color(&av->rb, &ai->volumes); ai->vols_found += 1; *created = true; dbg_bld("added volume %d", vol_id); return av; } /** * ubi_find_or_add_av - search for a volume in the attaching information and * add one if it does not exist. * @ai: attaching information * @vol_id: the requested volume ID * @created: whether the volume has been created or not * * This function returns a pointer to the new volume description or an * ERR_PTR if the operation failed. */ static struct ubi_ainf_volume *ubi_find_or_add_av(struct ubi_attach_info *ai, int vol_id, bool *created) { return find_or_add_av(ai, vol_id, AV_FIND_OR_ADD, created); } /** * ubi_alloc_aeb - allocate an aeb element * @ai: attaching information * @pnum: physical eraseblock number * @ec: erase counter of the physical eraseblock * * Allocate an aeb object and initialize the pnum and ec information. * vol_id and lnum are set to UBI_UNKNOWN, and the other fields are * initialized to zero. * Note that the element is not added in any list or RB tree. */ struct ubi_ainf_peb *ubi_alloc_aeb(struct ubi_attach_info *ai, int pnum, int ec) { struct ubi_ainf_peb *aeb; aeb = kmem_cache_zalloc(ai->aeb_slab_cache, GFP_KERNEL); if (!aeb) return NULL; aeb->pnum = pnum; aeb->ec = ec; aeb->vol_id = UBI_UNKNOWN; aeb->lnum = UBI_UNKNOWN; return aeb; } /** * ubi_free_aeb - free an aeb element * @ai: attaching information * @aeb: the element to free * * Free an aeb object. The caller must have removed the element from any list * or RB tree. */ void ubi_free_aeb(struct ubi_attach_info *ai, struct ubi_ainf_peb *aeb) { kmem_cache_free(ai->aeb_slab_cache, aeb); } /** * add_to_list - add physical eraseblock to a list. * @ai: attaching information * @pnum: physical eraseblock number to add * @vol_id: the last used volume id for the PEB * @lnum: the last used LEB number for the PEB * @ec: erase counter of the physical eraseblock * @to_head: if not zero, add to the head of the list * @list: the list to add to * * This function allocates a 'struct ubi_ainf_peb' object for physical * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists. * It stores the @lnum and @vol_id alongside, which can both be * %UBI_UNKNOWN if they are not available, not readable, or not assigned. * If @to_head is not zero, PEB will be added to the head of the list, which * basically means it will be processed first later. E.g., we add corrupted * PEBs (corrupted due to power cuts) to the head of the erase list to make * sure we erase them first and get rid of corruptions ASAP. This function * returns zero in case of success and a negative error code in case of * failure. */ static int add_to_list(struct ubi_attach_info *ai, int pnum, int vol_id, int lnum, int ec, int to_head, struct list_head *list) { struct ubi_ainf_peb *aeb; if (list == &ai->free) { dbg_bld("add to free: PEB %d, EC %d", pnum, ec); } else if (list == &ai->erase) { dbg_bld("add to erase: PEB %d, EC %d", pnum, ec); } else if (list == &ai->alien) { dbg_bld("add to alien: PEB %d, EC %d", pnum, ec); ai->alien_peb_count += 1; } else BUG(); aeb = ubi_alloc_aeb(ai, pnum, ec); if (!aeb) return -ENOMEM; aeb->vol_id = vol_id; aeb->lnum = lnum; if (to_head) list_add(&aeb->u.list, list); else list_add_tail(&aeb->u.list, list); return 0; } /** * add_corrupted - add a corrupted physical eraseblock. * @ai: attaching information * @pnum: physical eraseblock number to add * @ec: erase counter of the physical eraseblock * * This function allocates a 'struct ubi_ainf_peb' object for a corrupted * physical eraseblock @pnum and adds it to the 'corr' list. The corruption * was presumably not caused by a power cut. Returns zero in case of success * and a negative error code in case of failure. */ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec) { struct ubi_ainf_peb *aeb; dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); aeb = ubi_alloc_aeb(ai, pnum, ec); if (!aeb) return -ENOMEM; ai->corr_peb_count += 1; list_add(&aeb->u.list, &ai->corr); return 0; } /** * add_fastmap - add a Fastmap related physical eraseblock. * @ai: attaching information * @pnum: physical eraseblock number the VID header came from * @vid_hdr: the volume identifier header * @ec: erase counter of the physical eraseblock * * This function allocates a 'struct ubi_ainf_peb' object for a Fastamp * physical eraseblock @pnum and adds it to the 'fastmap' list. * Such blocks can be Fastmap super and data blocks from both the most * recent Fastmap we're attaching from or from old Fastmaps which will * be erased. */ static int add_fastmap(struct ubi_attach_info *ai, int pnum, struct ubi_vid_hdr *vid_hdr, int ec) { struct ubi_ainf_peb *aeb; aeb = ubi_alloc_aeb(ai, pnum, ec); if (!aeb) return -ENOMEM; aeb->vol_id = be32_to_cpu(vid_hdr->vol_id); aeb->sqnum = be64_to_cpu(vid_hdr->sqnum); list_add(&aeb->u.list, &ai->fastmap); dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum, aeb->vol_id, aeb->sqnum); return 0; } /** * validate_vid_hdr - check volume identifier header. * @ubi: UBI device description object * @vid_hdr: the volume identifier header to check * @av: information about the volume this logical eraseblock belongs to * @pnum: physical eraseblock number the VID header came from * * This function checks that data stored in @vid_hdr is consistent. Returns * non-zero if an inconsistency was found and zero if not. * * Note, UBI does sanity check of everything it reads from the flash media. * Most of the checks are done in the I/O sub-system. Here we check that the * information in the VID header is consistent to the information in other VID * headers of the same volume. */ static int validate_vid_hdr(const struct ubi_device *ubi, const struct ubi_vid_hdr *vid_hdr, const struct ubi_ainf_volume *av, int pnum) { int vol_type = vid_hdr->vol_type; int vol_id = be32_to_cpu(vid_hdr->vol_id); int used_ebs = be32_to_cpu(vid_hdr->used_ebs); int data_pad = be32_to_cpu(vid_hdr->data_pad); if (av->leb_count != 0) { int av_vol_type; /* * This is not the first logical eraseblock belonging to this * volume. Ensure that the data in its VID header is consistent * to the data in previous logical eraseblock headers. */ if (vol_id != av->vol_id) { ubi_err(ubi, "inconsistent vol_id"); goto bad; } if (av->vol_type == UBI_STATIC_VOLUME) av_vol_type = UBI_VID_STATIC; else av_vol_type = UBI_VID_DYNAMIC; if (vol_type != av_vol_type) { ubi_err(ubi, "inconsistent vol_type"); goto bad; } if (used_ebs != av->used_ebs) { ubi_err(ubi, "inconsistent used_ebs"); goto bad; } if (data_pad != av->data_pad) { ubi_err(ubi, "inconsistent data_pad"); goto bad; } } return 0; bad: ubi_err(ubi, "inconsistent VID header at PEB %d", pnum); ubi_dump_vid_hdr(vid_hdr); ubi_dump_av(av); return -EINVAL; } /** * add_volume - add volume to the attaching information. * @ai: attaching information * @vol_id: ID of the volume to add * @pnum: physical eraseblock number * @vid_hdr: volume identifier header * * If the volume corresponding to the @vid_hdr logical eraseblock is already * present in the attaching information, this function does nothing. Otherwise * it adds corresponding volume to the attaching information. Returns a pointer * to the allocated "av" object in case of success and a negative error code in * case of failure. */ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai, int vol_id, int pnum, const struct ubi_vid_hdr *vid_hdr) { struct ubi_ainf_volume *av; bool created; ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id)); av = ubi_find_or_add_av(ai, vol_id, &created); if (IS_ERR(av) || !created) return av; av->used_ebs = be32_to_cpu(vid_hdr->used_ebs); av->data_pad = be32_to_cpu(vid_hdr->data_pad); av->compat = vid_hdr->compat; av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; return av; } /** * ubi_compare_lebs - find out which logical eraseblock is newer. * @ubi: UBI device description object * @aeb: first logical eraseblock to compare * @pnum: physical eraseblock number of the second logical eraseblock to * compare * @vid_hdr: volume identifier header of the second logical eraseblock * * This function compares 2 copies of a LEB and informs which one is newer. In * case of success this function returns a positive value, in case of failure, a * negative error code is returned. The success return codes use the following * bits: * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the * second PEB (described by @pnum and @vid_hdr); * o bit 0 is set: the second PEB is newer; * o bit 1 is cleared: no bit-flips were detected in the newer LEB; * o bit 1 is set: bit-flips were detected in the newer LEB; * o bit 2 is cleared: the older LEB is not corrupted; * o bit 2 is set: the older LEB is corrupted. */ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb, int pnum, const struct ubi_vid_hdr *vid_hdr) { int len, err, second_is_newer, bitflips = 0, corrupted = 0; uint32_t data_crc, crc; struct ubi_vid_io_buf *vidb = NULL; unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); if (sqnum2 == aeb->sqnum) { /* * This must be a really ancient UBI image which has been * created before sequence numbers support has been added. At * that times we used 32-bit LEB versions stored in logical * eraseblocks. That was before UBI got into mainline. We do not * support these images anymore. Well, those images still work, * but only if no unclean reboots happened. */ ubi_err(ubi, "unsupported on-flash UBI format"); return -EINVAL; } /* Obviously the LEB with lower sequence counter is older */ second_is_newer = (sqnum2 > aeb->sqnum); /* * Now we know which copy is newer. If the copy flag of the PEB with * newer version is not set, then we just return, otherwise we have to * check data CRC. For the second PEB we already have the VID header, * for the first one - we'll need to re-read it from flash. * * Note: this may be optimized so that we wouldn't read twice. */ if (second_is_newer) { if (!vid_hdr->copy_flag) { /* It is not a copy, so it is newer */ dbg_bld("second PEB %d is newer, copy_flag is unset", pnum); return 1; } } else { if (!aeb->copy_flag) { /* It is not a copy, so it is newer */ dbg_bld("first PEB %d is newer, copy_flag is unset", pnum); return bitflips << 1; } vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); if (!vidb) return -ENOMEM; pnum = aeb->pnum; err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0); if (err) { if (err == UBI_IO_BITFLIPS) bitflips = 1; else { ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d", pnum, err); if (err > 0) err = -EIO; goto out_free_vidh; } } vid_hdr = ubi_get_vid_hdr(vidb); } /* Read the data of the copy and check the CRC */ len = be32_to_cpu(vid_hdr->data_size); mutex_lock(&ubi->buf_mutex); err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len); if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err)) goto out_unlock; data_crc = be32_to_cpu(vid_hdr->data_crc); crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len); if (crc != data_crc) { dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x", pnum, crc, data_crc); corrupted = 1; bitflips = 0; second_is_newer = !second_is_newer; } else { dbg_bld("PEB %d CRC is OK", pnum); bitflips |= !!err; } mutex_unlock(&ubi->buf_mutex); ubi_free_vid_buf(vidb); if (second_is_newer) dbg_bld("second PEB %d is newer, copy_flag is set", pnum); else dbg_bld("first PEB %d is newer, copy_flag is set", pnum); return second_is_newer | (bitflips << 1) | (corrupted << 2); out_unlock: mutex_unlock(&ubi->buf_mutex); out_free_vidh: ubi_free_vid_buf(vidb); return err; } /** * ubi_add_to_av - add used physical eraseblock to the attaching information. * @ubi: UBI device description object * @ai: attaching information * @pnum: the physical eraseblock number * @ec: erase counter * @vid_hdr: the volume identifier header * @bitflips: if bit-flips were detected when this physical eraseblock was read * * This function adds information about a used physical eraseblock to the * 'used' tree of the corresponding volume. The function is rather complex * because it has to handle cases when this is not the first physical * eraseblock belonging to the same logical eraseblock, and the newer one has * to be picked, while the older one has to be dropped. This function returns * zero in case of success and a negative error code in case of failure. */ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips) { int err, vol_id, lnum; unsigned long long sqnum; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb; struct rb_node **p, *parent = NULL; vol_id = be32_to_cpu(vid_hdr->vol_id); lnum = be32_to_cpu(vid_hdr->lnum); sqnum = be64_to_cpu(vid_hdr->sqnum); dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d", pnum, vol_id, lnum, ec, sqnum, bitflips); av = add_volume(ai, vol_id, pnum, vid_hdr); if (IS_ERR(av)) return PTR_ERR(av); if (ai->max_sqnum < sqnum) ai->max_sqnum = sqnum; /* * Walk the RB-tree of logical eraseblocks of volume @vol_id to look * if this is the first instance of this logical eraseblock or not. */ p = &av->root.rb_node; while (*p) { int cmp_res; parent = *p; aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); if (lnum != aeb->lnum) { if (lnum < aeb->lnum) p = &(*p)->rb_left; else p = &(*p)->rb_right; continue; } /* * There is already a physical eraseblock describing the same * logical eraseblock present. */ dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d", aeb->pnum, aeb->sqnum, aeb->ec); /* * Make sure that the logical eraseblocks have different * sequence numbers. Otherwise the image is bad. * * However, if the sequence number is zero, we assume it must * be an ancient UBI image from the era when UBI did not have * sequence numbers. We still can attach these images, unless * there is a need to distinguish between old and new * eraseblocks, in which case we'll refuse the image in * 'ubi_compare_lebs()'. In other words, we attach old clean * images, but refuse attaching old images with duplicated * logical eraseblocks because there was an unclean reboot. */ if (aeb->sqnum == sqnum && sqnum != 0) { ubi_err(ubi, "two LEBs with same sequence number %llu", sqnum); ubi_dump_aeb(aeb, 0); ubi_dump_vid_hdr(vid_hdr); return -EINVAL; } /* * Now we have to drop the older one and preserve the newer * one. */ cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr); if (cmp_res < 0) return cmp_res; if (cmp_res & 1) { /* * This logical eraseblock is newer than the one * found earlier. */ err = validate_vid_hdr(ubi, vid_hdr, av, pnum); if (err) return err; err = add_to_list(ai, aeb->pnum, aeb->vol_id, aeb->lnum, aeb->ec, cmp_res & 4, &ai->erase); if (err) return err; aeb->ec = ec; aeb->pnum = pnum; aeb->vol_id = vol_id; aeb->lnum = lnum; aeb->scrub = ((cmp_res & 2) || bitflips); aeb->copy_flag = vid_hdr->copy_flag; aeb->sqnum = sqnum; if (av->highest_lnum == lnum) av->last_data_size = be32_to_cpu(vid_hdr->data_size); return 0; } else { /* * This logical eraseblock is older than the one found * previously. */ return add_to_list(ai, pnum, vol_id, lnum, ec, cmp_res & 4, &ai->erase); } } /* * We've met this logical eraseblock for the first time, add it to the * attaching information. */ err = validate_vid_hdr(ubi, vid_hdr, av, pnum); if (err) return err; aeb = ubi_alloc_aeb(ai, pnum, ec); if (!aeb) return -ENOMEM; aeb->vol_id = vol_id; aeb->lnum = lnum; aeb->scrub = bitflips; aeb->copy_flag = vid_hdr->copy_flag; aeb->sqnum = sqnum; if (av->highest_lnum <= lnum) { av->highest_lnum = lnum; av->last_data_size = be32_to_cpu(vid_hdr->data_size); } av->leb_count += 1; rb_link_node(&aeb->u.rb, parent, p); rb_insert_color(&aeb->u.rb, &av->root); return 0; } /** * ubi_add_av - add volume to the attaching information. * @ai: attaching information * @vol_id: the requested volume ID * * This function returns a pointer to the new volume description or an * ERR_PTR if the operation failed. */ struct ubi_ainf_volume *ubi_add_av(struct ubi_attach_info *ai, int vol_id) { bool created; return find_or_add_av(ai, vol_id, AV_ADD, &created); } /** * ubi_find_av - find volume in the attaching information. * @ai: attaching information * @vol_id: the requested volume ID * * This function returns a pointer to the volume description or %NULL if there * are no data about this volume in the attaching information. */ struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai, int vol_id) { bool created; return find_or_add_av((struct ubi_attach_info *)ai, vol_id, AV_FIND, &created); } static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av, struct list_head *list); /** * ubi_remove_av - delete attaching information about a volume. * @ai: attaching information * @av: the volume attaching information to delete */ void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av) { dbg_bld("remove attaching information about volume %d", av->vol_id); rb_erase(&av->rb, &ai->volumes); destroy_av(ai, av, &ai->erase); ai->vols_found -= 1; } /** * early_erase_peb - erase a physical eraseblock. * @ubi: UBI device description object * @ai: attaching information * @pnum: physical eraseblock number to erase; * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown) * * This function erases physical eraseblock 'pnum', and writes the erase * counter header to it. This function should only be used on UBI device * initialization stages, when the EBA sub-system had not been yet initialized. * This function returns zero in case of success and a negative error code in * case of failure. */ static int early_erase_peb(struct ubi_device *ubi, const struct ubi_attach_info *ai, int pnum, int ec) { int err; struct ubi_ec_hdr *ec_hdr; if ((long long)ec >= UBI_MAX_ERASECOUNTER) { /* * Erase counter overflow. Upgrade UBI and use 64-bit * erase counters internally. */ ubi_err(ubi, "erase counter overflow at PEB %d, EC %d", pnum, ec); return -EINVAL; } ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ec_hdr) return -ENOMEM; ec_hdr->ec = cpu_to_be64(ec); err = ubi_io_sync_erase(ubi, pnum, 0); if (err < 0) goto out_free; err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr); out_free: kfree(ec_hdr); return err; } /** * ubi_early_get_peb - get a free physical eraseblock. * @ubi: UBI device description object * @ai: attaching information * * This function returns a free physical eraseblock. It is supposed to be * called on the UBI initialization stages when the wear-leveling sub-system is * not initialized yet. This function picks a physical eraseblocks from one of * the lists, writes the EC header if it is needed, and removes it from the * list. * * This function returns a pointer to the "aeb" of the found free PEB in case * of success and an error code in case of failure. */ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi, struct ubi_attach_info *ai) { int err = 0; struct ubi_ainf_peb *aeb, *tmp_aeb; if (!list_empty(&ai->free)) { aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list); list_del(&aeb->u.list); dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec); return aeb; } /* * We try to erase the first physical eraseblock from the erase list * and pick it if we succeed, or try to erase the next one if not. And * so forth. We don't want to take care about bad eraseblocks here - * they'll be handled later. */ list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) { if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1); if (err) continue; aeb->ec += 1; list_del(&aeb->u.list); dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec); return aeb; } ubi_err(ubi, "no free eraseblocks"); return ERR_PTR(-ENOSPC); } /** * check_corruption - check the data area of PEB. * @ubi: UBI device description object * @vid_hdr: the (corrupted) VID header of this PEB * @pnum: the physical eraseblock number to check * * This is a helper function which is used to distinguish between VID header * corruptions caused by power cuts and other reasons. If the PEB contains only * 0xFF bytes in the data area, the VID header is most probably corrupted * because of a power cut (%0 is returned in this case). Otherwise, it was * probably corrupted for some other reasons (%1 is returned in this case). A * negative error code is returned if a read error occurred. * * If the corruption reason was a power cut, UBI can safely erase this PEB. * Otherwise, it should preserve it to avoid possibly destroying important * information. */ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, int pnum) { int err; mutex_lock(&ubi->buf_mutex); memset(ubi->peb_buf, 0x00, ubi->leb_size); err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start, ubi->leb_size); if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) { /* * Bit-flips or integrity errors while reading the data area. * It is difficult to say for sure what type of corruption is * this, but presumably a power cut happened while this PEB was * erased, so it became unstable and corrupted, and should be * erased. */ err = 0; goto out_unlock; } if (err) goto out_unlock; if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size)) goto out_unlock; ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF", pnum); ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection"); ubi_dump_vid_hdr(vid_hdr); pr_err("hexdump of PEB %d offset %d, length %d", pnum, ubi->leb_start, ubi->leb_size); ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, ubi->peb_buf, ubi->leb_size, 1); err = 1; out_unlock: mutex_unlock(&ubi->buf_mutex); return err; } static bool vol_ignored(int vol_id) { switch (vol_id) { case UBI_LAYOUT_VOLUME_ID: return true; } #ifdef CONFIG_MTD_UBI_FASTMAP return ubi_is_fm_vol(vol_id); #else return false; #endif } /** * scan_peb - scan and process UBI headers of a PEB. * @ubi: UBI device description object * @ai: attaching information * @pnum: the physical eraseblock number * @fast: true if we're scanning for a Fastmap * * This function reads UBI headers of PEB @pnum, checks them, and adds * information about this PEB to the corresponding list or RB-tree in the * "attaching info" structure. Returns zero if the physical eraseblock was * successfully handled and a negative error code in case of failure. */ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, bool fast) { struct ubi_ec_hdr *ech = ai->ech; struct ubi_vid_io_buf *vidb = ai->vidb; struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb); long long ec; int err, bitflips = 0, vol_id = -1, ec_err = 0; dbg_bld("scan PEB %d", pnum); /* Skip bad physical eraseblocks */ err = ubi_io_is_bad(ubi, pnum); if (err < 0) return err; else if (err) { ai->bad_peb_count += 1; return 0; } err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (err < 0) return err; switch (err) { case 0: break; case UBI_IO_BITFLIPS: bitflips = 1; break; case UBI_IO_FF: ai->empty_peb_count += 1; return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, UBI_UNKNOWN, 0, &ai->erase); case UBI_IO_FF_BITFLIPS: ai->empty_peb_count += 1; return add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, UBI_UNKNOWN, 1, &ai->erase); case UBI_IO_BAD_HDR_EBADMSG: case UBI_IO_BAD_HDR: /* * We have to also look at the VID header, possibly it is not * corrupted. Set %bitflips flag in order to make this PEB be * moved and EC be re-created. */ ec_err = err; ec = UBI_UNKNOWN; bitflips = 1; break; default: ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d", err); return -EINVAL; } if (!ec_err) { int image_seq; /* Make sure UBI version is OK */ if (ech->version != UBI_VERSION) { ubi_err(ubi, "this UBI version is %d, image version is %d", UBI_VERSION, (int)ech->version); return -EINVAL; } ec = be64_to_cpu(ech->ec); if (ec > UBI_MAX_ERASECOUNTER) { /* * Erase counter overflow. The EC headers have 64 bits * reserved, but we anyway make use of only 31 bit * values, as this seems to be enough for any existing * flash. Upgrade UBI and use 64-bit erase counters * internally. */ ubi_err(ubi, "erase counter overflow, max is %d", UBI_MAX_ERASECOUNTER); ubi_dump_ec_hdr(ech); return -EINVAL; } /* * Make sure that all PEBs have the same image sequence number. * This allows us to detect situations when users flash UBI * images incorrectly, so that the flash has the new UBI image * and leftovers from the old one. This feature was added * relatively recently, and the sequence number was always * zero, because old UBI implementations always set it to zero. * For this reasons, we do not panic if some PEBs have zero * sequence number, while other PEBs have non-zero sequence * number. */ image_seq = be32_to_cpu(ech->image_seq); if (!ubi->image_seq) ubi->image_seq = image_seq; if (image_seq && ubi->image_seq != image_seq) { ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d", image_seq, pnum, ubi->image_seq); ubi_dump_ec_hdr(ech); return -EINVAL; } } /* OK, we've done with the EC header, let's look at the VID header */ err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 0); if (err < 0) return err; switch (err) { case 0: break; case UBI_IO_BITFLIPS: bitflips = 1; break; case UBI_IO_BAD_HDR_EBADMSG: if (ec_err == UBI_IO_BAD_HDR_EBADMSG) /* * Both EC and VID headers are corrupted and were read * with data integrity error, probably this is a bad * PEB, bit it is not marked as bad yet. This may also * be a result of power cut during erasure. */ ai->maybe_bad_peb_count += 1; fallthrough; case UBI_IO_BAD_HDR: /* * If we're facing a bad VID header we have to drop *all* * Fastmap data structures we find. The most recent Fastmap * could be bad and therefore there is a chance that we attach * from an old one. On a fine MTD stack a PEB must not render * bad all of a sudden, but the reality is different. * So, let's be paranoid and help finding the root cause by * falling back to scanning mode instead of attaching with a * bad EBA table and cause data corruption which is hard to * analyze. */ if (fast) ai->force_full_scan = 1; if (ec_err) /* * Both headers are corrupted. There is a possibility * that this a valid UBI PEB which has corresponding * LEB, but the headers are corrupted. However, it is * impossible to distinguish it from a PEB which just * contains garbage because of a power cut during erase * operation. So we just schedule this PEB for erasure. * * Besides, in case of NOR flash, we deliberately * corrupt both headers because NOR flash erasure is * slow and can start from the end. */ err = 0; else /* * The EC was OK, but the VID header is corrupted. We * have to check what is in the data area. */ err = check_corruption(ubi, vidh, pnum); if (err < 0) return err; else if (!err) /* This corruption is caused by a power cut */ err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->erase); else /* This is an unexpected corruption */ err = add_corrupted(ai, pnum, ec); if (err) return err; goto adjust_mean_ec; case UBI_IO_FF_BITFLIPS: err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->erase); if (err) return err; goto adjust_mean_ec; case UBI_IO_FF: if (ec_err || bitflips) err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->erase); else err = add_to_list(ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 0, &ai->free); if (err) return err; goto adjust_mean_ec; default: ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d", err); return -EINVAL; } vol_id = be32_to_cpu(vidh->vol_id); if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) { int lnum = be32_to_cpu(vidh->lnum); /* Unsupported internal volume */ switch (vidh->compat) { case UBI_COMPAT_DELETE: ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it", vol_id, lnum); err = add_to_list(ai, pnum, vol_id, lnum, ec, 1, &ai->erase); if (err) return err; return 0; case UBI_COMPAT_RO: ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode", vol_id, lnum); ubi->ro_mode = 1; break; case UBI_COMPAT_PRESERVE: ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found", vol_id, lnum); err = add_to_list(ai, pnum, vol_id, lnum, ec, 0, &ai->alien); if (err) return err; return 0; case UBI_COMPAT_REJECT: ubi_err(ubi, "incompatible internal volume %d:%d found", vol_id, lnum); return -EINVAL; } } if (ec_err) ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d", pnum); if (ubi_is_fm_vol(vol_id)) err = add_fastmap(ai, pnum, vidh, ec); else err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips); if (err) return err; adjust_mean_ec: if (!ec_err) { ai->ec_sum += ec; ai->ec_count += 1; if (ec > ai->max_ec) ai->max_ec = ec; if (ec < ai->min_ec) ai->min_ec = ec; } return 0; } /** * late_analysis - analyze the overall situation with PEB. * @ubi: UBI device description object * @ai: attaching information * * This is a helper function which takes a look what PEBs we have after we * gather information about all of them ("ai" is compete). It decides whether * the flash is empty and should be formatted of whether there are too many * corrupted PEBs and we should not attach this MTD device. Returns zero if we * should proceed with attaching the MTD device, and %-EINVAL if we should not. */ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai) { struct ubi_ainf_peb *aeb; int max_corr, peb_count; peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count; max_corr = peb_count / 20 ?: 8; /* * Few corrupted PEBs is not a problem and may be just a result of * unclean reboots. However, many of them may indicate some problems * with the flash HW or driver. */ if (ai->corr_peb_count) { ubi_err(ubi, "%d PEBs are corrupted and preserved", ai->corr_peb_count); pr_err("Corrupted PEBs are:"); list_for_each_entry(aeb, &ai->corr, u.list) pr_cont(" %d", aeb->pnum); pr_cont("\n"); /* * If too many PEBs are corrupted, we refuse attaching, * otherwise, only print a warning. */ if (ai->corr_peb_count >= max_corr) { ubi_err(ubi, "too many corrupted PEBs, refusing"); return -EINVAL; } } if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) { /* * All PEBs are empty, or almost all - a couple PEBs look like * they may be bad PEBs which were not marked as bad yet. * * This piece of code basically tries to distinguish between * the following situations: * * 1. Flash is empty, but there are few bad PEBs, which are not * marked as bad so far, and which were read with error. We * want to go ahead and format this flash. While formatting, * the faulty PEBs will probably be marked as bad. * * 2. Flash contains non-UBI data and we do not want to format * it and destroy possibly important information. */ if (ai->maybe_bad_peb_count <= 2) { ai->is_empty = 1; ubi_msg(ubi, "empty MTD device detected"); get_random_bytes(&ubi->image_seq, sizeof(ubi->image_seq)); } else { ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it"); return -EINVAL; } } return 0; } /** * destroy_av - free volume attaching information. * @av: volume attaching information * @ai: attaching information * @list: put the aeb elements in there if !NULL, otherwise free them * * This function destroys the volume attaching information. */ static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av, struct list_head *list) { struct ubi_ainf_peb *aeb; struct rb_node *this = av->root.rb_node; while (this) { if (this->rb_left) this = this->rb_left; else if (this->rb_right) this = this->rb_right; else { aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); this = rb_parent(this); if (this) { if (this->rb_left == &aeb->u.rb) this->rb_left = NULL; else this->rb_right = NULL; } if (list) list_add_tail(&aeb->u.list, list); else ubi_free_aeb(ai, aeb); } } kfree(av); } /** * destroy_ai - destroy attaching information. * @ai: attaching information */ static void destroy_ai(struct ubi_attach_info *ai) { struct ubi_ainf_peb *aeb, *aeb_tmp; struct ubi_ainf_volume *av; struct rb_node *rb; list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) { list_del(&aeb->u.list); ubi_free_aeb(ai, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) { list_del(&aeb->u.list); ubi_free_aeb(ai, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) { list_del(&aeb->u.list); ubi_free_aeb(ai, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) { list_del(&aeb->u.list); ubi_free_aeb(ai, aeb); } list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) { list_del(&aeb->u.list); ubi_free_aeb(ai, aeb); } /* Destroy the volume RB-tree */ rb = ai->volumes.rb_node; while (rb) { if (rb->rb_left) rb = rb->rb_left; else if (rb->rb_right) rb = rb->rb_right; else { av = rb_entry(rb, struct ubi_ainf_volume, rb); rb = rb_parent(rb); if (rb) { if (rb->rb_left == &av->rb) rb->rb_left = NULL; else rb->rb_right = NULL; } destroy_av(ai, av, NULL); } } kmem_cache_destroy(ai->aeb_slab_cache); kfree(ai); } /** * scan_all - scan entire MTD device. * @ubi: UBI device description object * @ai: attach info object * @start: start scanning at this PEB * * This function does full scanning of an MTD device and returns complete * information about it in form of a "struct ubi_attach_info" object. In case * of failure, an error code is returned. */ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai, int start) { int err, pnum; struct rb_node *rb1, *rb2; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb; err = -ENOMEM; ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ai->ech) return err; ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); if (!ai->vidb) goto out_ech; for (pnum = start; pnum < ubi->peb_count; pnum++) { cond_resched(); dbg_gen("process PEB %d", pnum); err = scan_peb(ubi, ai, pnum, false); if (err < 0) goto out_vidh; } ubi_msg(ubi, "scanning is finished"); /* Calculate mean erase counter */ if (ai->ec_count) ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count); err = late_analysis(ubi, ai); if (err) goto out_vidh; /* * In case of unknown erase counter we use the mean erase counter * value. */ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; } list_for_each_entry(aeb, &ai->free, u.list) { if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; } list_for_each_entry(aeb, &ai->corr, u.list) if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; list_for_each_entry(aeb, &ai->erase, u.list) if (aeb->ec == UBI_UNKNOWN) aeb->ec = ai->mean_ec; err = self_check_ai(ubi, ai); if (err) goto out_vidh; ubi_free_vid_buf(ai->vidb); kfree(ai->ech); return 0; out_vidh: ubi_free_vid_buf(ai->vidb); out_ech: kfree(ai->ech); return err; } static struct ubi_attach_info *alloc_ai(const char *slab_name) { struct ubi_attach_info *ai; ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL); if (!ai) return ai; INIT_LIST_HEAD(&ai->corr); INIT_LIST_HEAD(&ai->free); INIT_LIST_HEAD(&ai->erase); INIT_LIST_HEAD(&ai->alien); INIT_LIST_HEAD(&ai->fastmap); ai->volumes = RB_ROOT; ai->aeb_slab_cache = kmem_cache_create(slab_name, sizeof(struct ubi_ainf_peb), 0, 0, NULL); if (!ai->aeb_slab_cache) { kfree(ai); ai = NULL; } return ai; } #ifdef CONFIG_MTD_UBI_FASTMAP /** * scan_fast - try to find a fastmap and attach from it. * @ubi: UBI device description object * @ai: attach info object * * Returns 0 on success, negative return values indicate an internal * error. * UBI_NO_FASTMAP denotes that no fastmap was found. * UBI_BAD_FASTMAP denotes that the found fastmap was invalid. */ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai) { int err, pnum; struct ubi_attach_info *scan_ai; err = -ENOMEM; scan_ai = alloc_ai("ubi_aeb_slab_cache_fastmap"); if (!scan_ai) goto out; scan_ai->ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!scan_ai->ech) goto out_ai; scan_ai->vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); if (!scan_ai->vidb) goto out_ech; for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) { cond_resched(); dbg_gen("process PEB %d", pnum); err = scan_peb(ubi, scan_ai, pnum, true); if (err < 0) goto out_vidh; } ubi_free_vid_buf(scan_ai->vidb); kfree(scan_ai->ech); if (scan_ai->force_full_scan) err = UBI_NO_FASTMAP; else err = ubi_scan_fastmap(ubi, *ai, scan_ai); if (err) { /* * Didn't attach via fastmap, do a full scan but reuse what * we've aready scanned. */ destroy_ai(*ai); *ai = scan_ai; } else destroy_ai(scan_ai); return err; out_vidh: ubi_free_vid_buf(scan_ai->vidb); out_ech: kfree(scan_ai->ech); out_ai: destroy_ai(scan_ai); out: return err; } #endif /** * ubi_attach - attach an MTD device. * @ubi: UBI device descriptor * @force_scan: if set to non-zero attach by scanning * * This function returns zero in case of success and a negative error code in * case of failure. */ int ubi_attach(struct ubi_device *ubi, int force_scan) { int err; struct ubi_attach_info *ai; ai = alloc_ai("ubi_aeb_slab_cache"); if (!ai) return -ENOMEM; #ifdef CONFIG_MTD_UBI_FASTMAP /* On small flash devices we disable fastmap in any case. */ if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) { ubi->fm_disabled = 1; force_scan = 1; } if (force_scan) err = scan_all(ubi, ai, 0); else { err = scan_fast(ubi, &ai); if (err > 0 || mtd_is_eccerr(err)) { if (err != UBI_NO_FASTMAP) { destroy_ai(ai); ai = alloc_ai("ubi_aeb_slab_cache"); if (!ai) return -ENOMEM; err = scan_all(ubi, ai, 0); } else { err = scan_all(ubi, ai, UBI_FM_MAX_START); } } } #else err = scan_all(ubi, ai, 0); #endif if (err) goto out_ai; ubi->bad_peb_count = ai->bad_peb_count; ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; ubi->corr_peb_count = ai->corr_peb_count; ubi->max_ec = ai->max_ec; ubi->mean_ec = ai->mean_ec; dbg_gen("max. sequence number: %llu", ai->max_sqnum); err = ubi_read_volume_table(ubi, ai); if (err) goto out_ai; err = ubi_wl_init(ubi, ai); if (err) goto out_vtbl; err = ubi_eba_init(ubi, ai); if (err) goto out_wl; #ifdef CONFIG_MTD_UBI_FASTMAP if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) { struct ubi_attach_info *scan_ai; scan_ai = alloc_ai("ubi_aeb_slab_cache_dbg_chk_fastmap"); if (!scan_ai) { err = -ENOMEM; goto out_wl; } err = scan_all(ubi, scan_ai, 0); if (err) { destroy_ai(scan_ai); goto out_wl; } err = self_check_eba(ubi, ai, scan_ai); destroy_ai(scan_ai); if (err) goto out_wl; } #endif destroy_ai(ai); return 0; out_wl: ubi_wl_close(ubi); out_vtbl: ubi_free_all_volumes(ubi); vfree(ubi->vtbl); out_ai: destroy_ai(ai); return err; } /** * self_check_ai - check the attaching information. * @ubi: UBI device description object * @ai: attaching information * * This function returns zero if the attaching information is all right, and a * negative error code if not or if an error occurred. */ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai) { struct ubi_vid_io_buf *vidb = ai->vidb; struct ubi_vid_hdr *vidh = ubi_get_vid_hdr(vidb); int pnum, err, vols_found = 0; struct rb_node *rb1, *rb2; struct ubi_ainf_volume *av; struct ubi_ainf_peb *aeb, *last_aeb; uint8_t *buf; if (!ubi_dbg_chk_gen(ubi)) return 0; /* * At first, check that attaching information is OK. */ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { int leb_count = 0; cond_resched(); vols_found += 1; if (ai->is_empty) { ubi_err(ubi, "bad is_empty flag"); goto bad_av; } if (av->vol_id < 0 || av->highest_lnum < 0 || av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 || av->data_pad < 0 || av->last_data_size < 0) { ubi_err(ubi, "negative values"); goto bad_av; } if (av->vol_id >= UBI_MAX_VOLUMES && av->vol_id < UBI_INTERNAL_VOL_START) { ubi_err(ubi, "bad vol_id"); goto bad_av; } if (av->vol_id > ai->highest_vol_id) { ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there", ai->highest_vol_id, av->vol_id); goto out; } if (av->vol_type != UBI_DYNAMIC_VOLUME && av->vol_type != UBI_STATIC_VOLUME) { ubi_err(ubi, "bad vol_type"); goto bad_av; } if (av->data_pad > ubi->leb_size / 2) { ubi_err(ubi, "bad data_pad"); goto bad_av; } last_aeb = NULL; ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { cond_resched(); last_aeb = aeb; leb_count += 1; if (aeb->pnum < 0 || aeb->ec < 0) { ubi_err(ubi, "negative values"); goto bad_aeb; } if (aeb->ec < ai->min_ec) { ubi_err(ubi, "bad ai->min_ec (%d), %d found", ai->min_ec, aeb->ec); goto bad_aeb; } if (aeb->ec > ai->max_ec) { ubi_err(ubi, "bad ai->max_ec (%d), %d found", ai->max_ec, aeb->ec); goto bad_aeb; } if (aeb->pnum >= ubi->peb_count) { ubi_err(ubi, "too high PEB number %d, total PEBs %d", aeb->pnum, ubi->peb_count); goto bad_aeb; } if (av->vol_type == UBI_STATIC_VOLUME) { if (aeb->lnum >= av->used_ebs) { ubi_err(ubi, "bad lnum or used_ebs"); goto bad_aeb; } } else { if (av->used_ebs != 0) { ubi_err(ubi, "non-zero used_ebs"); goto bad_aeb; } } if (aeb->lnum > av->highest_lnum) { ubi_err(ubi, "incorrect highest_lnum or lnum"); goto bad_aeb; } } if (av->leb_count != leb_count) { ubi_err(ubi, "bad leb_count, %d objects in the tree", leb_count); goto bad_av; } if (!last_aeb) continue; aeb = last_aeb; if (aeb->lnum != av->highest_lnum) { ubi_err(ubi, "bad highest_lnum"); goto bad_aeb; } } if (vols_found != ai->vols_found) { ubi_err(ubi, "bad ai->vols_found %d, should be %d", ai->vols_found, vols_found); goto out; } /* Check that attaching information is correct */ ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) { last_aeb = NULL; ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) { int vol_type; cond_resched(); last_aeb = aeb; err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidb, 1); if (err && err != UBI_IO_BITFLIPS) { ubi_err(ubi, "VID header is not OK (%d)", err); if (err > 0) err = -EIO; return err; } vol_type = vidh->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; if (av->vol_type != vol_type) { ubi_err(ubi, "bad vol_type"); goto bad_vid_hdr; } if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) { ubi_err(ubi, "bad sqnum %llu", aeb->sqnum); goto bad_vid_hdr; } if (av->vol_id != be32_to_cpu(vidh->vol_id)) { ubi_err(ubi, "bad vol_id %d", av->vol_id); goto bad_vid_hdr; } if (av->compat != vidh->compat) { ubi_err(ubi, "bad compat %d", vidh->compat); goto bad_vid_hdr; } if (aeb->lnum != be32_to_cpu(vidh->lnum)) { ubi_err(ubi, "bad lnum %d", aeb->lnum); goto bad_vid_hdr; } if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) { ubi_err(ubi, "bad used_ebs %d", av->used_ebs); goto bad_vid_hdr; } if (av->data_pad != be32_to_cpu(vidh->data_pad)) { ubi_err(ubi, "bad data_pad %d", av->data_pad); goto bad_vid_hdr; } } if (!last_aeb) continue; if (av->highest_lnum != be32_to_cpu(vidh->lnum)) { ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum); goto bad_vid_hdr; } if (av->last_data_size != be32_to_cpu(vidh->data_size)) { ubi_err(ubi, "bad last_data_size %d", av->last_data_size); goto bad_vid_hdr; } } /* * Make sure that all the physical eraseblocks are in one of the lists * or trees. */ buf = kzalloc(ubi->peb_count, GFP_KERNEL); if (!buf) return -ENOMEM; for (pnum = 0; pnum < ubi->peb_count; pnum++) { err = ubi_io_is_bad(ubi, pnum); if (err < 0) { kfree(buf); return err; } else if (err) buf[pnum] = 1; } ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->free, u.list) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->corr, u.list) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->erase, u.list) buf[aeb->pnum] = 1; list_for_each_entry(aeb, &ai->alien, u.list) buf[aeb->pnum] = 1; err = 0; for (pnum = 0; pnum < ubi->peb_count; pnum++) if (!buf[pnum]) { ubi_err(ubi, "PEB %d is not referred", pnum); err = 1; } kfree(buf); if (err) goto out; return 0; bad_aeb: ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum); ubi_dump_aeb(aeb, 0); ubi_dump_av(av); goto out; bad_av: ubi_err(ubi, "bad attaching information about volume %d", av->vol_id); ubi_dump_av(av); goto out; bad_vid_hdr: ubi_err(ubi, "bad attaching information about volume %d", av->vol_id); ubi_dump_av(av); ubi_dump_vid_hdr(vidh); out: dump_stack(); return -EINVAL; } |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module implementing an IP set type: the hash:ip,port,net type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <net/tcp.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 /* 1 SCTP and UDPLITE support added */ /* 2 Range as input support for IPv4 added */ /* 3 nomatch flag support added */ /* 4 Counters support added */ /* 5 Comments support added */ /* 6 Forceadd support added */ /* 7 skbinfo support added */ #define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:ip,port,net"); /* Type specific function prefix */ #define HTYPE hash_ipportnet /* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0 * However this way we have to store internally cidr - 1, * dancing back and forth. */ #define IP_SET_HASH_WITH_NETS_PACKED #define IP_SET_HASH_WITH_PROTO #define IP_SET_HASH_WITH_NETS /* IPv4 variant */ /* Member elements */ struct hash_ipportnet4_elem { __be32 ip; __be32 ip2; __be16 port; u8 cidr:7; u8 nomatch:1; u8 proto; }; /* Common functions */ static bool hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1, const struct hash_ipportnet4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->ip2 == ip2->ip2 && ip1->cidr == ip2->cidr && ip1->port == ip2->port && ip1->proto == ip2->proto; } static int hash_ipportnet4_do_data_match(const struct hash_ipportnet4_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_ipportnet4_data_set_flags(struct hash_ipportnet4_elem *elem, u32 flags) { elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH); } static void hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr) { elem->ip2 &= ip_set_netmask(cidr); elem->cidr = cidr - 1; } static bool hash_ipportnet4_data_list(struct sk_buff *skb, const struct hash_ipportnet4_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_ipportnet4_data_next(struct hash_ipportnet4_elem *next, const struct hash_ipportnet4_elem *d) { next->ip = d->ip; next->port = d->port; next->ip2 = d->ip2; } #define MTYPE hash_ipportnet4 #define HOST_MASK 32 #include "ip_set_hash_gen.h" static int hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_ipportnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK - 1; if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2); e.ip2 &= ip_set_netmask(e.cidr + 1); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct hash_ipportnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, p = 0, port, port_to; u32 ip2_from = 0, ip2_to = 0, ip2, i = 0; bool with_ports = false; u8 cidr; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); if (ret) return ret; if (tb[IPSET_ATTR_CIDR2]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; e.cidr = cidr - 1; } e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else { return -IPSET_ERR_MISSING_PROTO; } if (!(with_ports || e.proto == IPPROTO_ICMP)) e.port = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); } with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; if (adt == IPSET_TEST || !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) { e.ip = htonl(ip); e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1)); ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } ip_to = ip; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip > ip_to) swap(ip, ip_to); } else if (tb[IPSET_ATTR_CIDR]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); } port_to = port = ntohs(e.port); if (tb[IPSET_ATTR_PORT_TO]) { port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); } ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); if (ret) return ret; if (ip2_from > ip2_to) swap(ip2_from, ip2_to); if (ip2_from + UINT_MAX == ip2_to) return -IPSET_ERR_HASH_RANGE; } else { ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1); } if (retried) { ip = ntohl(h->next.ip); p = ntohs(h->next.port); ip2 = ntohl(h->next.ip2); } else { p = port; ip2 = ip2_from; } for (; ip <= ip_to; ip++) { e.ip = htonl(ip); for (; p <= port_to; p++) { e.port = htons(p); do { i++; e.ip2 = htonl(ip2); ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr); e.cidr = cidr - 1; if (i > IPSET_MAX_RANGE) { hash_ipportnet4_data_next(&h->next, &e); return -ERANGE; } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } while (ip2++ < ip2_to); ip2 = ip2_from; } p = port; } return ret; } /* IPv6 variant */ struct hash_ipportnet6_elem { union nf_inet_addr ip; union nf_inet_addr ip2; __be16 port; u8 cidr:7; u8 nomatch:1; u8 proto; }; /* Common functions */ static bool hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1, const struct hash_ipportnet6_elem *ip2, u32 *multi) { return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && ipv6_addr_equal(&ip1->ip2.in6, &ip2->ip2.in6) && ip1->cidr == ip2->cidr && ip1->port == ip2->port && ip1->proto == ip2->proto; } static int hash_ipportnet6_do_data_match(const struct hash_ipportnet6_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_ipportnet6_data_set_flags(struct hash_ipportnet6_elem *elem, u32 flags) { elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH); } static void hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr) { ip6_netmask(&elem->ip2, cidr); elem->cidr = cidr - 1; } static bool hash_ipportnet6_data_list(struct sk_buff *skb, const struct hash_ipportnet6_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_ipportnet6_data_next(struct hash_ipportnet6_elem *next, const struct hash_ipportnet6_elem *d) { next->port = d->port; } #undef MTYPE #undef HOST_MASK #define MTYPE hash_ipportnet6 #define HOST_MASK 128 #define IP_SET_EMIT_CREATE #include "ip_set_hash_gen.h" static int hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_ipportnet6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet6_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK - 1; if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6); ip6_netmask(&e.ip2, e.cidr + 1); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct hash_ipportnet6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to; bool with_ports = false; u8 cidr; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; if (unlikely(tb[IPSET_ATTR_CIDR])) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (cidr != HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2); if (ret) return ret; if (tb[IPSET_ATTR_CIDR2]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; e.cidr = cidr - 1; } ip6_netmask(&e.ip2, e.cidr + 1); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else { return -IPSET_ERR_MISSING_PROTO; } if (!(with_ports || e.proto == IPPROTO_ICMPV6)) e.port = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } port = ntohs(e.port); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); if (retried) port = ntohs(h->next.port); for (; port <= port_to; port++) { e.port = htons(port); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } return ret; } static struct ip_set_type hash_ipportnet_type __read_mostly = { .name = "hash:ip,port,net", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 | IPSET_TYPE_NOMATCH, .dimension = IPSET_DIM_THREE, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE, .create = hash_ipportnet_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_INITVAL] = { .type = NLA_U32 }, [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, [IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_PORT] = { .type = NLA_U16 }, [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR2] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING, .len = IPSET_MAX_COMMENT_SIZE }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_ipportnet_init(void) { return ip_set_type_register(&hash_ipportnet_type); } static void __exit hash_ipportnet_fini(void) { rcu_barrier(); ip_set_type_unregister(&hash_ipportnet_type); } module_init(hash_ipportnet_init); module_exit(hash_ipportnet_fini); |
| 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_RT_H #define _LINUX_SCHED_RT_H #include <linux/sched.h> struct task_struct; static inline bool rt_prio(int prio) { return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO); } static inline bool rt_or_dl_prio(int prio) { return unlikely(prio < MAX_RT_PRIO); } /* * Returns true if a task has a priority that belongs to RT class. PI-boosted * tasks will return true. Use rt_policy() to ignore PI-boosted tasks. */ static inline bool rt_task(struct task_struct *p) { return rt_prio(p->prio); } /* * Returns true if a task has a priority that belongs to RT or DL classes. * PI-boosted tasks will return true. Use rt_or_dl_task_policy() to ignore * PI-boosted tasks. */ static inline bool rt_or_dl_task(struct task_struct *p) { return rt_or_dl_prio(p->prio); } /* * Returns true if a task has a policy that belongs to RT or DL classes. * PI-boosted tasks will return false. */ static inline bool rt_or_dl_task_policy(struct task_struct *tsk) { int policy = tsk->policy; if (policy == SCHED_FIFO || policy == SCHED_RR) return true; if (policy == SCHED_DEADLINE) return true; return false; } #ifdef CONFIG_RT_MUTEXES extern void rt_mutex_pre_schedule(void); extern void rt_mutex_schedule(void); extern void rt_mutex_post_schedule(void); /* * Must hold either p->pi_lock or task_rq(p)->lock. */ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p) { return p->pi_top_task; } extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task); extern void rt_mutex_adjust_pi(struct task_struct *p); #else static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) { return NULL; } # define rt_mutex_adjust_pi(p) do { } while (0) #endif extern void normalize_rt_tasks(void); /* * default timeslice is 100 msecs (used only for SCHED_RR tasks). * Timeslices get refilled after they expire. */ #define RR_TIMESLICE (100 * HZ / 1000) #endif /* _LINUX_SCHED_RT_H */ |
| 19 47 30 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 | /* SPDX-License-Identifier: GPL-2.0 */ /* * This file provides wrappers with sanitizer instrumentation for bit * locking operations. * * To use this functionality, an arch's bitops.h file needs to define each of * the below bit operations with an arch_ prefix (e.g. arch_set_bit(), * arch___set_bit(), etc.). */ #ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H #define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H #include <linux/instrumented.h> /** * clear_bit_unlock - Clear a bit in memory, for unlock * @nr: the bit to set * @addr: the address to start counting from * * This operation is atomic and provides release barrier semantics. */ static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) { kcsan_release(); instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long)); arch_clear_bit_unlock(nr, addr); } /** * __clear_bit_unlock - Clears a bit in memory * @nr: Bit to clear * @addr: Address to start counting from * * This is a non-atomic operation but implies a release barrier before the * memory operation. It can be used for an unlock if no other CPUs can * concurrently modify other bits in the word. */ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) { kcsan_release(); instrument_write(addr + BIT_WORD(nr), sizeof(long)); arch___clear_bit_unlock(nr, addr); } /** * test_and_set_bit_lock - Set a bit and return its old value, for lock * @nr: Bit to set * @addr: Address to count from * * This operation is atomic and provides acquire barrier semantics if * the returned value is 0. * It can be used to implement bit locks. */ static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr) { instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long)); return arch_test_and_set_bit_lock(nr, addr); } /** * xor_unlock_is_negative_byte - XOR a single byte in memory and test if * it is negative, for unlock. * @mask: Change the bits which are set in this mask. * @addr: The address of the word containing the byte to change. * * Changes some of bits 0-6 in the word pointed to by @addr. * This operation is atomic and provides release barrier semantics. * Used to optimise some folio operations which are commonly paired * with an unlock or end of writeback. Bit 7 is used as PG_waiters to * indicate whether anybody is waiting for the unlock. * * Return: Whether the top bit of the byte is set. */ static inline bool xor_unlock_is_negative_byte(unsigned long mask, volatile unsigned long *addr) { kcsan_release(); instrument_atomic_write(addr, sizeof(long)); return arch_xor_unlock_is_negative_byte(mask, addr); } #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */ |
| 10 16 16 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_SCHED_RCUPDATE_WAIT_H #define _LINUX_SCHED_RCUPDATE_WAIT_H /* * RCU synchronization types and methods: */ #include <linux/rcupdate.h> #include <linux/completion.h> #include <linux/sched.h> /* * Structure allowing asynchronous waiting on RCU. */ struct rcu_synchronize { struct rcu_head head; struct completion completion; /* This is for debugging. */ struct rcu_gp_oldstate oldstate; }; void wakeme_after_rcu(struct rcu_head *head); void __wait_rcu_gp(bool checktiny, unsigned int state, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array); #define _wait_rcu_gp(checktiny, state, ...) \ do { \ call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ __wait_rcu_gp(checktiny, state, ARRAY_SIZE(__crcu_array), __crcu_array, __rs_array); \ } while (0) #define wait_rcu_gp(...) _wait_rcu_gp(false, TASK_UNINTERRUPTIBLE, __VA_ARGS__) #define wait_rcu_gp_state(state, ...) _wait_rcu_gp(false, state, __VA_ARGS__) /** * synchronize_rcu_mult - Wait concurrently for multiple grace periods * @...: List of call_rcu() functions for different grace periods to wait on * * This macro waits concurrently for multiple types of RCU grace periods. * For example, synchronize_rcu_mult(call_rcu, call_rcu_tasks) would wait * on concurrent RCU and RCU-tasks grace periods. Waiting on a given SRCU * domain requires you to write a wrapper function for that SRCU domain's * call_srcu() function, with this wrapper supplying the pointer to the * corresponding srcu_struct. * * Note that call_rcu_hurry() should be used instead of call_rcu() * because in kernels built with CONFIG_RCU_LAZY=y the delay between the * invocation of call_rcu() and that of the corresponding RCU callback * can be multiple seconds. * * The first argument tells Tiny RCU's _wait_rcu_gp() not to * bother waiting for RCU. The reason for this is because anywhere * synchronize_rcu_mult() can be called is automatically already a full * grace period. */ #define synchronize_rcu_mult(...) \ _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), TASK_UNINTERRUPTIBLE, __VA_ARGS__) static inline void cond_resched_rcu(void) { #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) rcu_read_unlock(); cond_resched(); rcu_read_lock(); #endif } // Has the current task blocked within its current RCU read-side // critical section? static inline bool has_rcu_reader_blocked(void) { #ifdef CONFIG_PREEMPT_RCU return !list_empty(¤t->rcu_node_entry); #else return false; #endif } #endif /* _LINUX_SCHED_RCUPDATE_WAIT_H */ |
| 3 1 1 2 1 2 3 12 11 12 5 2 5 9 9 1 8 4 2 3 2 1 1 1 8 4 8 1 1 1 12 13 13 1 1 1 1 1 1 1 3 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017 Red Hat, Inc. */ #include "fuse_i.h" #include <linux/uio.h> #include <linux/compat.h> #include <linux/fileattr.h> #include <linux/fsverity.h> #define FUSE_VERITY_ENABLE_ARG_MAX_PAGES 256 static ssize_t fuse_send_ioctl(struct fuse_mount *fm, struct fuse_args *args, struct fuse_ioctl_out *outarg) { ssize_t ret; args->out_args[0].size = sizeof(*outarg); args->out_args[0].value = outarg; ret = fuse_simple_request(fm, args); /* Translate ENOSYS, which shouldn't be returned from fs */ if (ret == -ENOSYS) ret = -ENOTTY; if (ret >= 0 && outarg->result == -ENOSYS) outarg->result = -ENOTTY; return ret; } /* * CUSE servers compiled on 32bit broke on 64bit kernels because the * ABI was defined to be 'struct iovec' which is different on 32bit * and 64bit. Fortunately we can determine which structure the server * used from the size of the reply. */ static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src, size_t transferred, unsigned count, bool is_compat) { #ifdef CONFIG_COMPAT if (count * sizeof(struct compat_iovec) == transferred) { struct compat_iovec *ciov = src; unsigned i; /* * With this interface a 32bit server cannot support * non-compat (i.e. ones coming from 64bit apps) ioctl * requests */ if (!is_compat) return -EINVAL; for (i = 0; i < count; i++) { dst[i].iov_base = compat_ptr(ciov[i].iov_base); dst[i].iov_len = ciov[i].iov_len; } return 0; } #endif if (count * sizeof(struct iovec) != transferred) return -EIO; memcpy(dst, src, transferred); return 0; } /* Make sure iov_length() won't overflow */ static int fuse_verify_ioctl_iov(struct fuse_conn *fc, struct iovec *iov, size_t count) { size_t n; u32 max = fc->max_pages << PAGE_SHIFT; for (n = 0; n < count; n++, iov++) { if (iov->iov_len > (size_t) max) return -ENOMEM; max -= iov->iov_len; } return 0; } static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst, void *src, size_t transferred, unsigned count, bool is_compat) { unsigned i; struct fuse_ioctl_iovec *fiov = src; if (fc->minor < 16) { return fuse_copy_ioctl_iovec_old(dst, src, transferred, count, is_compat); } if (count * sizeof(struct fuse_ioctl_iovec) != transferred) return -EIO; for (i = 0; i < count; i++) { /* Did the server supply an inappropriate value? */ if (fiov[i].base != (unsigned long) fiov[i].base || fiov[i].len != (unsigned long) fiov[i].len) return -EIO; dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base; dst[i].iov_len = (size_t) fiov[i].len; #ifdef CONFIG_COMPAT if (is_compat && (ptr_to_compat(dst[i].iov_base) != fiov[i].base || (compat_size_t) dst[i].iov_len != fiov[i].len)) return -EIO; #endif } return 0; } /* For fs-verity, determine iov lengths from input */ static int fuse_setup_measure_verity(unsigned long arg, struct iovec *iov) { __u16 digest_size; struct fsverity_digest __user *uarg = (void __user *)arg; if (copy_from_user(&digest_size, &uarg->digest_size, sizeof(digest_size))) return -EFAULT; if (digest_size > SIZE_MAX - sizeof(struct fsverity_digest)) return -EINVAL; iov->iov_len = sizeof(struct fsverity_digest) + digest_size; return 0; } static int fuse_setup_enable_verity(unsigned long arg, struct iovec *iov, unsigned int *in_iovs) { struct fsverity_enable_arg enable; struct fsverity_enable_arg __user *uarg = (void __user *)arg; const __u32 max_buffer_len = FUSE_VERITY_ENABLE_ARG_MAX_PAGES * PAGE_SIZE; if (copy_from_user(&enable, uarg, sizeof(enable))) return -EFAULT; if (enable.salt_size > max_buffer_len || enable.sig_size > max_buffer_len) return -ENOMEM; if (enable.salt_size > 0) { iov++; (*in_iovs)++; iov->iov_base = u64_to_user_ptr(enable.salt_ptr); iov->iov_len = enable.salt_size; } if (enable.sig_size > 0) { iov++; (*in_iovs)++; iov->iov_base = u64_to_user_ptr(enable.sig_ptr); iov->iov_len = enable.sig_size; } return 0; } /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed * to dereference the passed pointer, so the parameter requires deep * copying but FUSE has no idea whatsoever about what to copy in or * out. * * This is solved by allowing FUSE server to retry ioctl with * necessary in/out iovecs. Let's assume the ioctl implementation * needs to read in the following structure. * * struct a { * char *buf; * size_t buflen; * } * * On the first callout to FUSE server, inarg->in_size and * inarg->out_size will be NULL; then, the server completes the ioctl * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and * the actual iov array to * * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } * * which tells FUSE to copy in the requested area and retry the ioctl. * On the second round, the server has access to the structure and * from that it can tell what to look for next, so on the invocation, * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to * * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, * { .iov_base = a.buf, .iov_len = a.buflen } } * * FUSE will copy both struct a and the pointed buffer from the * process doing the ioctl and retry ioctl with both struct a and the * buffer. * * This time, FUSE server has everything it needs and completes ioctl * without FUSE_IOCTL_RETRY which finishes the ioctl call. * * Copying data out works the same way. * * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel * automatically initializes in and out iovs by decoding @cmd with * _IOC_* macros and the server is not allowed to request RETRY. This * limits ioctl data transfers to well-formed ioctls and is the forced * behavior for all FUSE servers. */ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, unsigned int flags) { struct fuse_file *ff = file->private_data; struct fuse_mount *fm = ff->fm; struct fuse_ioctl_in inarg = { .fh = ff->fh, .cmd = cmd, .arg = arg, .flags = flags }; struct fuse_ioctl_out outarg; struct iovec *iov_page = NULL; struct iovec *in_iov = NULL, *out_iov = NULL; unsigned int in_iovs = 0, out_iovs = 0, max_pages; size_t in_size, out_size, c; ssize_t transferred; int err, i; struct iov_iter ii; struct fuse_args_pages ap = {}; #if BITS_PER_LONG == 32 inarg.flags |= FUSE_IOCTL_32BIT; #else if (flags & FUSE_IOCTL_COMPAT) { inarg.flags |= FUSE_IOCTL_32BIT; #ifdef CONFIG_X86_X32_ABI if (in_x32_syscall()) inarg.flags |= FUSE_IOCTL_COMPAT_X32; #endif } #endif /* assume all the iovs returned by client always fits in a page */ BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); err = -ENOMEM; ap.folios = fuse_folios_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs); iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); if (!ap.folios || !iov_page) goto out; fuse_folio_descs_length_init(ap.descs, 0, fm->fc->max_pages); /* * If restricted, initialize IO parameters as encoded in @cmd. * RETRY from server is not allowed. */ if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { struct iovec *iov = iov_page; iov->iov_base = (void __user *)arg; iov->iov_len = _IOC_SIZE(cmd); if (_IOC_DIR(cmd) & _IOC_WRITE) { in_iov = iov; in_iovs = 1; } if (_IOC_DIR(cmd) & _IOC_READ) { out_iov = iov; out_iovs = 1; } err = 0; switch (cmd) { case FS_IOC_MEASURE_VERITY: err = fuse_setup_measure_verity(arg, iov); break; case FS_IOC_ENABLE_VERITY: err = fuse_setup_enable_verity(arg, iov, &in_iovs); break; } if (err) goto out; } retry: inarg.in_size = in_size = iov_length(in_iov, in_iovs); inarg.out_size = out_size = iov_length(out_iov, out_iovs); /* * Out data can be used either for actual out data or iovs, * make sure there always is at least one page. */ out_size = max_t(size_t, out_size, PAGE_SIZE); max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); /* make sure there are enough buffer pages and init request with them */ err = -ENOMEM; if (max_pages > fm->fc->max_pages) goto out; while (ap.num_folios < max_pages) { ap.folios[ap.num_folios] = folio_alloc(GFP_KERNEL | __GFP_HIGHMEM, 0); if (!ap.folios[ap.num_folios]) goto out; ap.num_folios++; } /* okay, let's send it to the client */ ap.args.opcode = FUSE_IOCTL; ap.args.nodeid = ff->nodeid; ap.args.in_numargs = 1; ap.args.in_args[0].size = sizeof(inarg); ap.args.in_args[0].value = &inarg; if (in_size) { ap.args.in_numargs++; ap.args.in_args[1].size = in_size; ap.args.in_pages = true; err = -EFAULT; iov_iter_init(&ii, ITER_SOURCE, in_iov, in_iovs, in_size); for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_folios); i++) { c = copy_folio_from_iter(ap.folios[i], 0, PAGE_SIZE, &ii); if (c != PAGE_SIZE && iov_iter_count(&ii)) goto out; } } ap.args.out_numargs = 2; ap.args.out_args[1].size = out_size; ap.args.out_pages = true; ap.args.out_argvar = true; transferred = fuse_send_ioctl(fm, &ap.args, &outarg); err = transferred; if (transferred < 0) goto out; /* did it ask for retry? */ if (outarg.flags & FUSE_IOCTL_RETRY) { void *vaddr; /* no retry if in restricted mode */ err = -EIO; if (!(flags & FUSE_IOCTL_UNRESTRICTED)) goto out; in_iovs = outarg.in_iovs; out_iovs = outarg.out_iovs; /* * Make sure things are in boundary, separate checks * are to protect against overflow. */ err = -ENOMEM; if (in_iovs > FUSE_IOCTL_MAX_IOV || out_iovs > FUSE_IOCTL_MAX_IOV || in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) goto out; vaddr = kmap_local_folio(ap.folios[0], 0); err = fuse_copy_ioctl_iovec(fm->fc, iov_page, vaddr, transferred, in_iovs + out_iovs, (flags & FUSE_IOCTL_COMPAT) != 0); kunmap_local(vaddr); if (err) goto out; in_iov = iov_page; out_iov = in_iov + in_iovs; err = fuse_verify_ioctl_iov(fm->fc, in_iov, in_iovs); if (err) goto out; err = fuse_verify_ioctl_iov(fm->fc, out_iov, out_iovs); if (err) goto out; goto retry; } err = -EIO; if (transferred > inarg.out_size) goto out; err = -EFAULT; iov_iter_init(&ii, ITER_DEST, out_iov, out_iovs, transferred); for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_folios); i++) { c = copy_folio_to_iter(ap.folios[i], 0, PAGE_SIZE, &ii); if (c != PAGE_SIZE && iov_iter_count(&ii)) goto out; } err = 0; out: free_page((unsigned long) iov_page); while (ap.num_folios) folio_put(ap.folios[--ap.num_folios]); kfree(ap.folios); return err ? err : outarg.result; } EXPORT_SYMBOL_GPL(fuse_do_ioctl); long fuse_ioctl_common(struct file *file, unsigned int cmd, unsigned long arg, unsigned int flags) { struct inode *inode = file_inode(file); struct fuse_conn *fc = get_fuse_conn(inode); if (!fuse_allow_current_process(fc)) return -EACCES; if (fuse_is_bad(inode)) return -EIO; return fuse_do_ioctl(file, cmd, arg, flags); } long fuse_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return fuse_ioctl_common(file, cmd, arg, 0); } long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); } static int fuse_priv_ioctl(struct inode *inode, struct fuse_file *ff, unsigned int cmd, void *ptr, size_t size) { struct fuse_mount *fm = ff->fm; struct fuse_ioctl_in inarg; struct fuse_ioctl_out outarg; FUSE_ARGS(args); int err; memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; inarg.cmd = cmd; #if BITS_PER_LONG == 32 inarg.flags |= FUSE_IOCTL_32BIT; #endif if (S_ISDIR(inode->i_mode)) inarg.flags |= FUSE_IOCTL_DIR; if (_IOC_DIR(cmd) & _IOC_READ) inarg.out_size = size; if (_IOC_DIR(cmd) & _IOC_WRITE) inarg.in_size = size; args.opcode = FUSE_IOCTL; args.nodeid = ff->nodeid; args.in_numargs = 2; args.in_args[0].size = sizeof(inarg); args.in_args[0].value = &inarg; args.in_args[1].size = inarg.in_size; args.in_args[1].value = ptr; args.out_numargs = 2; args.out_args[1].size = inarg.out_size; args.out_args[1].value = ptr; err = fuse_send_ioctl(fm, &args, &outarg); if (!err) { if (outarg.result < 0) err = outarg.result; else if (outarg.flags & FUSE_IOCTL_RETRY) err = -EIO; } return err; } static struct fuse_file *fuse_priv_ioctl_prepare(struct inode *inode) { struct fuse_mount *fm = get_fuse_mount(inode); bool isdir = S_ISDIR(inode->i_mode); if (!fuse_allow_current_process(fm->fc)) return ERR_PTR(-EACCES); if (fuse_is_bad(inode)) return ERR_PTR(-EIO); if (!S_ISREG(inode->i_mode) && !isdir) return ERR_PTR(-ENOTTY); return fuse_file_open(fm, get_node_id(inode), O_RDONLY, isdir); } static void fuse_priv_ioctl_cleanup(struct inode *inode, struct fuse_file *ff) { fuse_file_release(inode, ff, O_RDONLY, NULL, S_ISDIR(inode->i_mode)); } int fuse_fileattr_get(struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct fuse_file *ff; unsigned int flags; struct fsxattr xfa; int err; ff = fuse_priv_ioctl_prepare(inode); if (IS_ERR(ff)) return PTR_ERR(ff); if (fa->flags_valid) { err = fuse_priv_ioctl(inode, ff, FS_IOC_GETFLAGS, &flags, sizeof(flags)); if (err) goto cleanup; fileattr_fill_flags(fa, flags); } else { err = fuse_priv_ioctl(inode, ff, FS_IOC_FSGETXATTR, &xfa, sizeof(xfa)); if (err) goto cleanup; fileattr_fill_xflags(fa, xfa.fsx_xflags); fa->fsx_extsize = xfa.fsx_extsize; fa->fsx_nextents = xfa.fsx_nextents; fa->fsx_projid = xfa.fsx_projid; fa->fsx_cowextsize = xfa.fsx_cowextsize; } cleanup: fuse_priv_ioctl_cleanup(inode, ff); if (err == -ENOTTY) err = -EOPNOTSUPP; return err; } int fuse_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct file_kattr *fa) { struct inode *inode = d_inode(dentry); struct fuse_file *ff; unsigned int flags = fa->flags; struct fsxattr xfa; int err; ff = fuse_priv_ioctl_prepare(inode); if (IS_ERR(ff)) return PTR_ERR(ff); if (fa->flags_valid) { err = fuse_priv_ioctl(inode, ff, FS_IOC_SETFLAGS, &flags, sizeof(flags)); if (err) goto cleanup; } else { memset(&xfa, 0, sizeof(xfa)); xfa.fsx_xflags = fa->fsx_xflags; xfa.fsx_extsize = fa->fsx_extsize; xfa.fsx_nextents = fa->fsx_nextents; xfa.fsx_projid = fa->fsx_projid; xfa.fsx_cowextsize = fa->fsx_cowextsize; err = fuse_priv_ioctl(inode, ff, FS_IOC_FSSETXATTR, &xfa, sizeof(xfa)); } cleanup: fuse_priv_ioctl_cleanup(inode, ff); if (err == -ENOTTY) err = -EOPNOTSUPP; return err; } |
| 1 1 1 2 5 5 3 2 3 2 1 2 2 3 5 2 1 1 3 1 2 2 2 2 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 | // SPDX-License-Identifier: GPL-2.0-or-later /* LRW: as defined by Cyril Guyot in * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf * * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> * * Based on ecb.c * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> */ /* This implementation is checked against the test vectors in the above * document and by a test vector provided by Ken Buchanan at * https://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html * * The test vectors are included in the testing module tcrypt.[ch] */ #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <crypto/b128ops.h> #include <crypto/gf128mul.h> #define LRW_BLOCK_SIZE 16 struct lrw_tfm_ctx { struct crypto_skcipher *child; /* * optimizes multiplying a random (non incrementing, as at the * start of a new sector) value with key2, we could also have * used 4k optimization tables or no optimization at all. In the * latter case we would have to store key2 here */ struct gf128mul_64k *table; /* * stores: * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } * key2*{ 0,0,...1,1,1,1,1 }, etc * needed for optimized multiplication of incrementing values * with key2 */ be128 mulinc[128]; }; struct lrw_request_ctx { be128 t; struct skcipher_request subreq; }; static inline void lrw_setbit128_bbe(void *b, int bit) { __set_bit(bit ^ (0x80 - #ifdef __BIG_ENDIAN BITS_PER_LONG #else BITS_PER_BYTE #endif ), b); } static int lrw_setkey(struct crypto_skcipher *parent, const u8 *key, unsigned int keylen) { struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(parent); struct crypto_skcipher *child = ctx->child; int err, bsize = LRW_BLOCK_SIZE; const u8 *tweak = key + keylen - bsize; be128 tmp = { 0 }; int i; crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(child, key, keylen - bsize); if (err) return err; if (ctx->table) gf128mul_free_64k(ctx->table); /* initialize multiplication table for Key2 */ ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); if (!ctx->table) return -ENOMEM; /* initialize optimization table */ for (i = 0; i < 128; i++) { lrw_setbit128_bbe(&tmp, i); ctx->mulinc[i] = tmp; gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); } return 0; } /* * Returns the number of trailing '1' bits in the words of the counter, which is * represented by 4 32-bit words, arranged from least to most significant. * At the same time, increments the counter by one. * * For example: * * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 }; * int i = lrw_next_index(&counter); * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 } */ static int lrw_next_index(u32 *counter) { int i, res = 0; for (i = 0; i < 4; i++) { if (counter[i] + 1 != 0) return res + ffz(counter[i]++); counter[i] = 0; res += 32; } /* * If we get here, then x == 128 and we are incrementing the counter * from all ones to all zeros. This means we must return index 127, i.e. * the one corresponding to key2*{ 1,...,1 }. */ return 127; } /* * We compute the tweak masks twice (both before and after the ECB encryption or * decryption) to avoid having to allocate a temporary buffer and/or make * mutliple calls to the 'ecb(..)' instance, which usually would be slower than * just doing the lrw_next_index() calls again. */ static int lrw_xor_tweak(struct skcipher_request *req, bool second_pass) { const int bs = LRW_BLOCK_SIZE; struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct lrw_request_ctx *rctx = skcipher_request_ctx(req); be128 t = rctx->t; struct skcipher_walk w; __be32 *iv; u32 counter[4]; int err; if (second_pass) { req = &rctx->subreq; /* set to our TFM to enforce correct alignment: */ skcipher_request_set_tfm(req, tfm); } err = skcipher_walk_virt(&w, req, false); if (err) return err; iv = (__be32 *)w.iv; counter[0] = be32_to_cpu(iv[3]); counter[1] = be32_to_cpu(iv[2]); counter[2] = be32_to_cpu(iv[1]); counter[3] = be32_to_cpu(iv[0]); while (w.nbytes) { unsigned int avail = w.nbytes; const be128 *wsrc; be128 *wdst; wsrc = w.src.virt.addr; wdst = w.dst.virt.addr; do { be128_xor(wdst++, &t, wsrc++); /* T <- I*Key2, using the optimization * discussed in the specification */ be128_xor(&t, &t, &ctx->mulinc[lrw_next_index(counter)]); } while ((avail -= bs) >= bs); if (second_pass && w.nbytes == w.total) { iv[0] = cpu_to_be32(counter[3]); iv[1] = cpu_to_be32(counter[2]); iv[2] = cpu_to_be32(counter[1]); iv[3] = cpu_to_be32(counter[0]); } err = skcipher_walk_done(&w, avail); } return err; } static int lrw_xor_tweak_pre(struct skcipher_request *req) { return lrw_xor_tweak(req, false); } static int lrw_xor_tweak_post(struct skcipher_request *req) { return lrw_xor_tweak(req, true); } static void lrw_crypt_done(void *data, int err) { struct skcipher_request *req = data; if (!err) { struct lrw_request_ctx *rctx = skcipher_request_ctx(req); rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; err = lrw_xor_tweak_post(req); } skcipher_request_complete(req, err); } static void lrw_init_crypt(struct skcipher_request *req) { const struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; skcipher_request_set_tfm(subreq, ctx->child); skcipher_request_set_callback(subreq, req->base.flags, lrw_crypt_done, req); /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */ skcipher_request_set_crypt(subreq, req->dst, req->dst, req->cryptlen, req->iv); /* calculate first value of T */ memcpy(&rctx->t, req->iv, sizeof(rctx->t)); /* T <- I*Key2 */ gf128mul_64k_bbe(&rctx->t, ctx->table); } static int lrw_encrypt(struct skcipher_request *req) { struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; lrw_init_crypt(req); return lrw_xor_tweak_pre(req) ?: crypto_skcipher_encrypt(subreq) ?: lrw_xor_tweak_post(req); } static int lrw_decrypt(struct skcipher_request *req) { struct lrw_request_ctx *rctx = skcipher_request_ctx(req); struct skcipher_request *subreq = &rctx->subreq; lrw_init_crypt(req); return lrw_xor_tweak_pre(req) ?: crypto_skcipher_decrypt(subreq) ?: lrw_xor_tweak_post(req); } static int lrw_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_instance *inst = skcipher_alg_instance(tfm); struct crypto_skcipher_spawn *spawn = skcipher_instance_ctx(inst); struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *cipher; cipher = crypto_spawn_skcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(cipher) + sizeof(struct lrw_request_ctx)); return 0; } static void lrw_exit_tfm(struct crypto_skcipher *tfm) { struct lrw_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->table) gf128mul_free_64k(ctx->table); crypto_free_skcipher(ctx->child); } static void lrw_free_instance(struct skcipher_instance *inst) { crypto_drop_skcipher(skcipher_instance_ctx(inst)); kfree(inst); } static int lrw_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_skcipher_spawn *spawn; struct skcipher_alg_common *alg; struct skcipher_instance *inst; const char *cipher_name; char ecb_name[CRYPTO_MAX_ALG_NAME]; u32 mask; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); if (err) return err; cipher_name = crypto_attr_alg_name(tb[1]); if (IS_ERR(cipher_name)) return PTR_ERR(cipher_name); inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); if (!inst) return -ENOMEM; spawn = skcipher_instance_ctx(inst); err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), cipher_name, 0, mask); if (err == -ENOENT && memcmp(cipher_name, "ecb(", 4)) { err = -ENAMETOOLONG; if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)", cipher_name) >= CRYPTO_MAX_ALG_NAME) goto err_free_inst; err = crypto_grab_skcipher(spawn, skcipher_crypto_instance(inst), ecb_name, 0, mask); } if (err) goto err_free_inst; alg = crypto_spawn_skcipher_alg_common(spawn); err = -EINVAL; if (alg->base.cra_blocksize != LRW_BLOCK_SIZE) goto err_free_inst; if (alg->ivsize) goto err_free_inst; err = crypto_inst_setname(skcipher_crypto_instance(inst), "lrw", &alg->base); if (err) goto err_free_inst; err = -EINVAL; cipher_name = alg->base.cra_name; /* Alas we screwed up the naming so we have to mangle the * cipher name. */ if (!memcmp(cipher_name, "ecb(", 4)) { int len; len = strscpy(ecb_name, cipher_name + 4, sizeof(ecb_name)); if (len < 2) goto err_free_inst; if (ecb_name[len - 1] != ')') goto err_free_inst; ecb_name[len - 1] = 0; if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "lrw(%s)", ecb_name) >= CRYPTO_MAX_ALG_NAME) { err = -ENAMETOOLONG; goto err_free_inst; } } else goto err_free_inst; inst->alg.base.cra_priority = alg->base.cra_priority; inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE; inst->alg.base.cra_alignmask = alg->base.cra_alignmask | (__alignof__(be128) - 1); inst->alg.ivsize = LRW_BLOCK_SIZE; inst->alg.min_keysize = alg->min_keysize + LRW_BLOCK_SIZE; inst->alg.max_keysize = alg->max_keysize + LRW_BLOCK_SIZE; inst->alg.base.cra_ctxsize = sizeof(struct lrw_tfm_ctx); inst->alg.init = lrw_init_tfm; inst->alg.exit = lrw_exit_tfm; inst->alg.setkey = lrw_setkey; inst->alg.encrypt = lrw_encrypt; inst->alg.decrypt = lrw_decrypt; inst->free = lrw_free_instance; err = skcipher_register_instance(tmpl, inst); if (err) { err_free_inst: lrw_free_instance(inst); } return err; } static struct crypto_template lrw_tmpl = { .name = "lrw", .create = lrw_create, .module = THIS_MODULE, }; static int __init lrw_module_init(void) { return crypto_register_template(&lrw_tmpl); } static void __exit lrw_module_exit(void) { crypto_unregister_template(&lrw_tmpl); } module_init(lrw_module_init); module_exit(lrw_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode"); MODULE_ALIAS_CRYPTO("lrw"); MODULE_SOFTDEP("pre: ecb"); |
| 2 2 2 2 2 2 2 2 1 2 2 2 2 2 2 2 1 1 3 1 2 2 2 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. * * HID driver for NVIDIA SHIELD peripherals. */ #include <linux/hid.h> #include <linux/idr.h> #include <linux/input-event-codes.h> #include <linux/input.h> #include <linux/jiffies.h> #include <linux/leds.h> #include <linux/module.h> #include <linux/power_supply.h> #include <linux/spinlock.h> #include <linux/timer.h> #include <linux/workqueue.h> #include "hid-ids.h" #define NOT_INIT_STR "NOT INITIALIZED" #define android_map_key(c) hid_map_usage(hi, usage, bit, max, EV_KEY, (c)) enum { HID_USAGE_ANDROID_PLAYPAUSE_BTN = 0xcd, /* Double-tap volume slider */ HID_USAGE_ANDROID_VOLUMEUP_BTN = 0xe9, HID_USAGE_ANDROID_VOLUMEDOWN_BTN = 0xea, HID_USAGE_ANDROID_SEARCH_BTN = 0x221, /* NVIDIA btn on Thunderstrike */ HID_USAGE_ANDROID_HOME_BTN = 0x223, HID_USAGE_ANDROID_BACK_BTN = 0x224, }; enum { SHIELD_FW_VERSION_INITIALIZED = 0, SHIELD_BOARD_INFO_INITIALIZED, SHIELD_BATTERY_STATS_INITIALIZED, SHIELD_CHARGER_STATE_INITIALIZED, }; enum { THUNDERSTRIKE_FW_VERSION_UPDATE = 0, THUNDERSTRIKE_BOARD_INFO_UPDATE, THUNDERSTRIKE_HAPTICS_UPDATE, THUNDERSTRIKE_LED_UPDATE, THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, }; enum { THUNDERSTRIKE_HOSTCMD_REPORT_SIZE = 33, THUNDERSTRIKE_HOSTCMD_REQ_REPORT_ID = 0x4, THUNDERSTRIKE_HOSTCMD_RESP_REPORT_ID = 0x3, }; enum { THUNDERSTRIKE_HOSTCMD_ID_FW_VERSION = 1, THUNDERSTRIKE_HOSTCMD_ID_LED = 6, THUNDERSTRIKE_HOSTCMD_ID_BATTERY, THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO = 16, THUNDERSTRIKE_HOSTCMD_ID_USB_INIT = 53, THUNDERSTRIKE_HOSTCMD_ID_HAPTICS = 57, THUNDERSTRIKE_HOSTCMD_ID_CHARGER, }; struct power_supply_dev { struct power_supply *psy; struct power_supply_desc desc; }; struct thunderstrike_psy_prop_values { int voltage_min; int voltage_now; int voltage_avg; int voltage_boot; int capacity; int status; int charge_type; int temp; }; static const enum power_supply_property thunderstrike_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_MIN, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_VOLTAGE_BOOT, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_SCOPE, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_MIN, POWER_SUPPLY_PROP_TEMP_MAX, POWER_SUPPLY_PROP_TEMP_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MAX, }; enum thunderstrike_led_state { THUNDERSTRIKE_LED_OFF = 1, THUNDERSTRIKE_LED_ON = 8, } __packed; static_assert(sizeof(enum thunderstrike_led_state) == 1); struct thunderstrike_hostcmd_battery { __le16 voltage_avg; u8 reserved_at_10; __le16 thermistor; __le16 voltage_min; __le16 voltage_boot; __le16 voltage_now; u8 capacity; } __packed; enum thunderstrike_charger_type { THUNDERSTRIKE_CHARGER_TYPE_NONE = 0, THUNDERSTRIKE_CHARGER_TYPE_TRICKLE, THUNDERSTRIKE_CHARGER_TYPE_NORMAL, } __packed; static_assert(sizeof(enum thunderstrike_charger_type) == 1); enum thunderstrike_charger_state { THUNDERSTRIKE_CHARGER_STATE_UNKNOWN = 0, THUNDERSTRIKE_CHARGER_STATE_DISABLED, THUNDERSTRIKE_CHARGER_STATE_CHARGING, THUNDERSTRIKE_CHARGER_STATE_FULL, THUNDERSTRIKE_CHARGER_STATE_FAILED = 8, } __packed; static_assert(sizeof(enum thunderstrike_charger_state) == 1); struct thunderstrike_hostcmd_charger { u8 connected; enum thunderstrike_charger_type type; enum thunderstrike_charger_state state; } __packed; struct thunderstrike_hostcmd_board_info { __le16 revision; __le16 serial[7]; } __packed; struct thunderstrike_hostcmd_haptics { u8 motor_left; u8 motor_right; } __packed; struct thunderstrike_hostcmd_resp_report { u8 report_id; /* THUNDERSTRIKE_HOSTCMD_RESP_REPORT_ID */ u8 cmd_id; u8 reserved_at_10; union { struct thunderstrike_hostcmd_board_info board_info; struct thunderstrike_hostcmd_haptics motors; __le16 fw_version; enum thunderstrike_led_state led_state; struct thunderstrike_hostcmd_battery battery; struct thunderstrike_hostcmd_charger charger; u8 payload[30]; } __packed; } __packed; static_assert(sizeof(struct thunderstrike_hostcmd_resp_report) == THUNDERSTRIKE_HOSTCMD_REPORT_SIZE); struct thunderstrike_hostcmd_req_report { u8 report_id; /* THUNDERSTRIKE_HOSTCMD_REQ_REPORT_ID */ u8 cmd_id; u8 reserved_at_10; union { struct __packed { u8 update; enum thunderstrike_led_state state; } led; struct __packed { u8 update; struct thunderstrike_hostcmd_haptics motors; } haptics; } __packed; u8 reserved_at_30[27]; } __packed; static_assert(sizeof(struct thunderstrike_hostcmd_req_report) == THUNDERSTRIKE_HOSTCMD_REPORT_SIZE); /* Common struct for shield accessories. */ struct shield_device { struct hid_device *hdev; struct power_supply_dev battery_dev; unsigned long initialized_flags; const char *codename; u16 fw_version; struct { u16 revision; char serial_number[15]; } board_info; }; /* * Non-trivial to uniquely identify Thunderstrike controllers at initialization * time. Use an ID allocator to help with this. */ static DEFINE_IDA(thunderstrike_ida); struct thunderstrike { struct shield_device base; int id; /* Sub-devices */ struct input_dev *haptics_dev; struct led_classdev led_dev; /* Resources */ void *req_report_dmabuf; unsigned long update_flags; struct thunderstrike_hostcmd_haptics haptics_val; spinlock_t haptics_update_lock; u8 led_state : 1; enum thunderstrike_led_state led_value; struct thunderstrike_psy_prop_values psy_stats; spinlock_t psy_stats_lock; struct timer_list psy_stats_timer; struct work_struct hostcmd_req_work; }; static inline void thunderstrike_hostcmd_req_report_init( struct thunderstrike_hostcmd_req_report *report, u8 cmd_id) { memset(report, 0, sizeof(*report)); report->report_id = THUNDERSTRIKE_HOSTCMD_REQ_REPORT_ID; report->cmd_id = cmd_id; } static inline void shield_strrev(char *dest, size_t len, u16 rev) { dest[0] = ('A' - 1) + (rev >> 8); snprintf(&dest[1], len - 1, "%02X", 0xff & rev); } static struct input_dev *shield_allocate_input_dev(struct hid_device *hdev, const char *name_suffix) { struct input_dev *idev; idev = input_allocate_device(); if (!idev) goto err_device; idev->id.bustype = hdev->bus; idev->id.vendor = hdev->vendor; idev->id.product = hdev->product; idev->id.version = hdev->version; idev->uniq = hdev->uniq; idev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, name_suffix); if (!idev->name) goto err_name; input_set_drvdata(idev, hdev); return idev; err_name: input_free_device(idev); err_device: return ERR_PTR(-ENOMEM); } static struct input_dev *shield_haptics_create( struct shield_device *dev, int (*play_effect)(struct input_dev *, void *, struct ff_effect *)) { struct input_dev *haptics; int ret; if (!IS_ENABLED(CONFIG_NVIDIA_SHIELD_FF)) return NULL; haptics = shield_allocate_input_dev(dev->hdev, "Haptics"); if (IS_ERR(haptics)) return haptics; input_set_capability(haptics, EV_FF, FF_RUMBLE); ret = input_ff_create_memless(haptics, NULL, play_effect); if (ret) goto err; ret = input_register_device(haptics); if (ret) goto err; return haptics; err: input_free_device(haptics); return ERR_PTR(ret); } static inline void thunderstrike_send_hostcmd_request(struct thunderstrike *ts) { struct thunderstrike_hostcmd_req_report *report = ts->req_report_dmabuf; struct shield_device *shield_dev = &ts->base; int ret; ret = hid_hw_raw_request(shield_dev->hdev, report->report_id, ts->req_report_dmabuf, THUNDERSTRIKE_HOSTCMD_REPORT_SIZE, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); if (ret < 0) { hid_err(shield_dev->hdev, "Failed to output Thunderstrike HOSTCMD request HID report due to %pe\n", ERR_PTR(ret)); } } static void thunderstrike_hostcmd_req_work_handler(struct work_struct *work) { struct thunderstrike *ts = container_of(work, struct thunderstrike, hostcmd_req_work); struct thunderstrike_hostcmd_req_report *report; unsigned long flags; report = ts->req_report_dmabuf; if (test_and_clear_bit(THUNDERSTRIKE_FW_VERSION_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_FW_VERSION); thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_LED_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init(report, THUNDERSTRIKE_HOSTCMD_ID_LED); report->led.update = 1; report->led.state = ts->led_value; thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_BATTERY); thunderstrike_send_hostcmd_request(ts); thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_CHARGER); thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_BOARD_INFO_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO); thunderstrike_send_hostcmd_request(ts); } if (test_and_clear_bit(THUNDERSTRIKE_HAPTICS_UPDATE, &ts->update_flags)) { thunderstrike_hostcmd_req_report_init( report, THUNDERSTRIKE_HOSTCMD_ID_HAPTICS); report->haptics.update = 1; spin_lock_irqsave(&ts->haptics_update_lock, flags); report->haptics.motors = ts->haptics_val; spin_unlock_irqrestore(&ts->haptics_update_lock, flags); thunderstrike_send_hostcmd_request(ts); } } static inline void thunderstrike_request_firmware_version(struct thunderstrike *ts) { set_bit(THUNDERSTRIKE_FW_VERSION_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static inline void thunderstrike_request_board_info(struct thunderstrike *ts) { set_bit(THUNDERSTRIKE_BOARD_INFO_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static inline int thunderstrike_update_haptics(struct thunderstrike *ts, struct thunderstrike_hostcmd_haptics *motors) { unsigned long flags; spin_lock_irqsave(&ts->haptics_update_lock, flags); ts->haptics_val = *motors; spin_unlock_irqrestore(&ts->haptics_update_lock, flags); set_bit(THUNDERSTRIKE_HAPTICS_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); return 0; } static int thunderstrike_play_effect(struct input_dev *idev, void *data, struct ff_effect *effect) { struct hid_device *hdev = input_get_drvdata(idev); struct thunderstrike_hostcmd_haptics motors; struct shield_device *shield_dev; struct thunderstrike *ts; if (effect->type != FF_RUMBLE) return 0; shield_dev = hid_get_drvdata(hdev); ts = container_of(shield_dev, struct thunderstrike, base); /* Thunderstrike motor values range from 0 to 32 inclusively */ motors.motor_left = effect->u.rumble.strong_magnitude / 2047; motors.motor_right = effect->u.rumble.weak_magnitude / 2047; hid_dbg(hdev, "Thunderstrike FF_RUMBLE request, left: %u right: %u\n", motors.motor_left, motors.motor_right); return thunderstrike_update_haptics(ts, &motors); } static enum led_brightness thunderstrike_led_get_brightness(struct led_classdev *led) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct shield_device *shield_dev = hid_get_drvdata(hdev); struct thunderstrike *ts; ts = container_of(shield_dev, struct thunderstrike, base); return ts->led_state; } static void thunderstrike_led_set_brightness(struct led_classdev *led, enum led_brightness value) { struct hid_device *hdev = to_hid_device(led->dev->parent); struct shield_device *shield_dev = hid_get_drvdata(hdev); struct thunderstrike *ts; ts = container_of(shield_dev, struct thunderstrike, base); switch (value) { case LED_OFF: ts->led_value = THUNDERSTRIKE_LED_OFF; break; default: ts->led_value = THUNDERSTRIKE_LED_ON; break; } set_bit(THUNDERSTRIKE_LED_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static int thunderstrike_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct shield_device *shield_dev = power_supply_get_drvdata(psy); struct thunderstrike_psy_prop_values prop_values; struct thunderstrike *ts; int ret = 0; ts = container_of(shield_dev, struct thunderstrike, base); spin_lock(&ts->psy_stats_lock); prop_values = ts->psy_stats; spin_unlock(&ts->psy_stats_lock); switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = prop_values.status; break; case POWER_SUPPLY_PROP_CHARGE_TYPE: val->intval = prop_values.charge_type; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = 1; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN: val->intval = prop_values.voltage_min; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = 2900000; /* 2.9 V */ break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = 2200000; /* 2.2 V */ break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = prop_values.voltage_now; break; case POWER_SUPPLY_PROP_VOLTAGE_AVG: val->intval = prop_values.voltage_avg; break; case POWER_SUPPLY_PROP_VOLTAGE_BOOT: val->intval = prop_values.voltage_boot; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = prop_values.capacity; break; case POWER_SUPPLY_PROP_SCOPE: val->intval = POWER_SUPPLY_SCOPE_DEVICE; break; case POWER_SUPPLY_PROP_TEMP: val->intval = prop_values.temp; break; case POWER_SUPPLY_PROP_TEMP_MIN: val->intval = 0; /* 0 C */ break; case POWER_SUPPLY_PROP_TEMP_MAX: val->intval = 400; /* 40 C */ break; case POWER_SUPPLY_PROP_TEMP_ALERT_MIN: val->intval = 15; /* 1.5 C */ break; case POWER_SUPPLY_PROP_TEMP_ALERT_MAX: val->intval = 380; /* 38 C */ break; default: ret = -EINVAL; break; } return ret; } static inline void thunderstrike_request_psy_stats(struct thunderstrike *ts) { set_bit(THUNDERSTRIKE_POWER_SUPPLY_STATS_UPDATE, &ts->update_flags); schedule_work(&ts->hostcmd_req_work); } static void thunderstrike_psy_stats_timer_handler(struct timer_list *timer) { struct thunderstrike *ts = container_of(timer, struct thunderstrike, psy_stats_timer); thunderstrike_request_psy_stats(ts); /* Query battery statistics from device every five minutes */ mod_timer(timer, jiffies + 300 * HZ); } static void thunderstrike_parse_fw_version_payload(struct shield_device *shield_dev, __le16 fw_version) { shield_dev->fw_version = le16_to_cpu(fw_version); set_bit(SHIELD_FW_VERSION_INITIALIZED, &shield_dev->initialized_flags); hid_dbg(shield_dev->hdev, "Thunderstrike firmware version 0x%04X\n", shield_dev->fw_version); } static void thunderstrike_parse_board_info_payload(struct shield_device *shield_dev, struct thunderstrike_hostcmd_board_info *board_info) { char board_revision_str[4]; int i; shield_dev->board_info.revision = le16_to_cpu(board_info->revision); for (i = 0; i < 7; ++i) { u16 val = le16_to_cpu(board_info->serial[i]); shield_dev->board_info.serial_number[2 * i] = val & 0xFF; shield_dev->board_info.serial_number[2 * i + 1] = val >> 8; } shield_dev->board_info.serial_number[14] = '\0'; set_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags); shield_strrev(board_revision_str, 4, shield_dev->board_info.revision); hid_dbg(shield_dev->hdev, "Thunderstrike BOARD_REVISION_%s (0x%04X) S/N: %s\n", board_revision_str, shield_dev->board_info.revision, shield_dev->board_info.serial_number); } static inline void thunderstrike_parse_haptics_payload(struct shield_device *shield_dev, struct thunderstrike_hostcmd_haptics *haptics) { hid_dbg(shield_dev->hdev, "Thunderstrike haptics HOSTCMD response, left: %u right: %u\n", haptics->motor_left, haptics->motor_right); } static void thunderstrike_parse_led_payload(struct shield_device *shield_dev, enum thunderstrike_led_state led_state) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); switch (led_state) { case THUNDERSTRIKE_LED_OFF: ts->led_state = 0; break; case THUNDERSTRIKE_LED_ON: ts->led_state = 1; break; } hid_dbg(shield_dev->hdev, "Thunderstrike led HOSTCMD response, 0x%02X\n", led_state); } static void thunderstrike_parse_battery_payload( struct shield_device *shield_dev, struct thunderstrike_hostcmd_battery *battery) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); u16 hostcmd_voltage_boot = le16_to_cpu(battery->voltage_boot); u16 hostcmd_voltage_avg = le16_to_cpu(battery->voltage_avg); u16 hostcmd_voltage_min = le16_to_cpu(battery->voltage_min); u16 hostcmd_voltage_now = le16_to_cpu(battery->voltage_now); u16 hostcmd_thermistor = le16_to_cpu(battery->thermistor); int voltage_boot, voltage_avg, voltage_min, voltage_now; struct hid_device *hdev = shield_dev->hdev; u8 capacity = battery->capacity; int temp; /* Convert thunderstrike device values to µV and tenths of degree Celsius */ voltage_boot = hostcmd_voltage_boot * 1000; voltage_avg = hostcmd_voltage_avg * 1000; voltage_min = hostcmd_voltage_min * 1000; voltage_now = hostcmd_voltage_now * 1000; temp = (1378 - (int)hostcmd_thermistor) * 10 / 19; /* Copy converted values */ spin_lock(&ts->psy_stats_lock); ts->psy_stats.voltage_boot = voltage_boot; ts->psy_stats.voltage_avg = voltage_avg; ts->psy_stats.voltage_min = voltage_min; ts->psy_stats.voltage_now = voltage_now; ts->psy_stats.capacity = capacity; ts->psy_stats.temp = temp; spin_unlock(&ts->psy_stats_lock); set_bit(SHIELD_BATTERY_STATS_INITIALIZED, &shield_dev->initialized_flags); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, voltage_avg: %u voltage_now: %u\n", hostcmd_voltage_avg, hostcmd_voltage_now); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, voltage_boot: %u voltage_min: %u\n", hostcmd_voltage_boot, hostcmd_voltage_min); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, thermistor: %u\n", hostcmd_thermistor); hid_dbg(hdev, "Thunderstrike battery HOSTCMD response, capacity: %u%%\n", capacity); } static void thunderstrike_parse_charger_payload( struct shield_device *shield_dev, struct thunderstrike_hostcmd_charger *charger) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); int charge_type = POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; struct hid_device *hdev = shield_dev->hdev; int status = POWER_SUPPLY_STATUS_UNKNOWN; switch (charger->type) { case THUNDERSTRIKE_CHARGER_TYPE_NONE: charge_type = POWER_SUPPLY_CHARGE_TYPE_NONE; break; case THUNDERSTRIKE_CHARGER_TYPE_TRICKLE: charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; break; case THUNDERSTRIKE_CHARGER_TYPE_NORMAL: charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD; break; default: hid_warn(hdev, "Unhandled Thunderstrike charger HOSTCMD type, %u\n", charger->type); break; } switch (charger->state) { case THUNDERSTRIKE_CHARGER_STATE_UNKNOWN: status = POWER_SUPPLY_STATUS_UNKNOWN; break; case THUNDERSTRIKE_CHARGER_STATE_DISABLED: /* Indicates charger is disconnected */ break; case THUNDERSTRIKE_CHARGER_STATE_CHARGING: status = POWER_SUPPLY_STATUS_CHARGING; break; case THUNDERSTRIKE_CHARGER_STATE_FULL: status = POWER_SUPPLY_STATUS_FULL; break; case THUNDERSTRIKE_CHARGER_STATE_FAILED: status = POWER_SUPPLY_STATUS_NOT_CHARGING; hid_err(hdev, "Thunderstrike device failed to charge\n"); break; default: hid_warn(hdev, "Unhandled Thunderstrike charger HOSTCMD state, %u\n", charger->state); break; } if (!charger->connected) status = POWER_SUPPLY_STATUS_DISCHARGING; spin_lock(&ts->psy_stats_lock); ts->psy_stats.charge_type = charge_type; ts->psy_stats.status = status; spin_unlock(&ts->psy_stats_lock); set_bit(SHIELD_CHARGER_STATE_INITIALIZED, &shield_dev->initialized_flags); hid_dbg(hdev, "Thunderstrike charger HOSTCMD response, connected: %u, type: %u, state: %u\n", charger->connected, charger->type, charger->state); } static inline void thunderstrike_device_init_info(struct shield_device *shield_dev) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); if (!test_bit(SHIELD_FW_VERSION_INITIALIZED, &shield_dev->initialized_flags)) thunderstrike_request_firmware_version(ts); if (!test_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags)) thunderstrike_request_board_info(ts); if (!test_bit(SHIELD_BATTERY_STATS_INITIALIZED, &shield_dev->initialized_flags) || !test_bit(SHIELD_CHARGER_STATE_INITIALIZED, &shield_dev->initialized_flags)) thunderstrike_psy_stats_timer_handler(&ts->psy_stats_timer); } static int thunderstrike_parse_report(struct shield_device *shield_dev, struct hid_report *report, u8 *data, int size) { struct thunderstrike_hostcmd_resp_report *hostcmd_resp_report; struct hid_device *hdev = shield_dev->hdev; switch (report->id) { case THUNDERSTRIKE_HOSTCMD_RESP_REPORT_ID: if (size != THUNDERSTRIKE_HOSTCMD_REPORT_SIZE) { hid_err(hdev, "Encountered Thunderstrike HOSTCMD HID report with unexpected size %d\n", size); return -EINVAL; } hostcmd_resp_report = (struct thunderstrike_hostcmd_resp_report *)data; switch (hostcmd_resp_report->cmd_id) { case THUNDERSTRIKE_HOSTCMD_ID_FW_VERSION: thunderstrike_parse_fw_version_payload( shield_dev, hostcmd_resp_report->fw_version); break; case THUNDERSTRIKE_HOSTCMD_ID_LED: thunderstrike_parse_led_payload(shield_dev, hostcmd_resp_report->led_state); break; case THUNDERSTRIKE_HOSTCMD_ID_BATTERY: thunderstrike_parse_battery_payload(shield_dev, &hostcmd_resp_report->battery); break; case THUNDERSTRIKE_HOSTCMD_ID_BOARD_INFO: thunderstrike_parse_board_info_payload( shield_dev, &hostcmd_resp_report->board_info); break; case THUNDERSTRIKE_HOSTCMD_ID_HAPTICS: thunderstrike_parse_haptics_payload( shield_dev, &hostcmd_resp_report->motors); break; case THUNDERSTRIKE_HOSTCMD_ID_USB_INIT: /* May block HOSTCMD requests till received initially */ thunderstrike_device_init_info(shield_dev); break; case THUNDERSTRIKE_HOSTCMD_ID_CHARGER: /* May block HOSTCMD requests till received initially */ thunderstrike_device_init_info(shield_dev); thunderstrike_parse_charger_payload( shield_dev, &hostcmd_resp_report->charger); break; default: hid_warn(hdev, "Unhandled Thunderstrike HOSTCMD id %d\n", hostcmd_resp_report->cmd_id); return -ENOENT; } break; default: return 0; } return 0; } static inline int thunderstrike_led_create(struct thunderstrike *ts) { struct led_classdev *led = &ts->led_dev; led->name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL, "thunderstrike%d:blue:led", ts->id); if (!led->name) return -ENOMEM; led->max_brightness = 1; led->flags = LED_CORE_SUSPENDRESUME | LED_RETAIN_AT_SHUTDOWN; led->brightness_get = &thunderstrike_led_get_brightness; led->brightness_set = &thunderstrike_led_set_brightness; return led_classdev_register(&ts->base.hdev->dev, led); } static inline int thunderstrike_psy_create(struct shield_device *shield_dev) { struct thunderstrike *ts = container_of(shield_dev, struct thunderstrike, base); struct power_supply_config psy_cfg = { .drv_data = shield_dev, }; struct hid_device *hdev = shield_dev->hdev; int ret; /* * Set an initial capacity and temperature value to avoid prematurely * triggering alerts. Will be replaced by values queried from initial * HOSTCMD requests. */ ts->psy_stats.capacity = 100; ts->psy_stats.temp = 182; shield_dev->battery_dev.desc.properties = thunderstrike_battery_props; shield_dev->battery_dev.desc.num_properties = ARRAY_SIZE(thunderstrike_battery_props); shield_dev->battery_dev.desc.get_property = thunderstrike_battery_get_property; shield_dev->battery_dev.desc.type = POWER_SUPPLY_TYPE_BATTERY; shield_dev->battery_dev.desc.name = devm_kasprintf(&ts->base.hdev->dev, GFP_KERNEL, "thunderstrike_%d", ts->id); if (!shield_dev->battery_dev.desc.name) return -ENOMEM; shield_dev->battery_dev.psy = power_supply_register( &hdev->dev, &shield_dev->battery_dev.desc, &psy_cfg); if (IS_ERR(shield_dev->battery_dev.psy)) { hid_err(hdev, "Failed to register Thunderstrike battery device\n"); return PTR_ERR(shield_dev->battery_dev.psy); } ret = power_supply_powers(shield_dev->battery_dev.psy, &hdev->dev); if (ret) { hid_err(hdev, "Failed to associate battery device to Thunderstrike\n"); goto err; } return 0; err: power_supply_unregister(shield_dev->battery_dev.psy); return ret; } static struct shield_device *thunderstrike_create(struct hid_device *hdev) { struct shield_device *shield_dev; struct thunderstrike *ts; int ret; ts = devm_kzalloc(&hdev->dev, sizeof(*ts), GFP_KERNEL); if (!ts) return ERR_PTR(-ENOMEM); ts->req_report_dmabuf = devm_kzalloc( &hdev->dev, THUNDERSTRIKE_HOSTCMD_REPORT_SIZE, GFP_KERNEL); if (!ts->req_report_dmabuf) return ERR_PTR(-ENOMEM); shield_dev = &ts->base; shield_dev->hdev = hdev; shield_dev->codename = "Thunderstrike"; spin_lock_init(&ts->haptics_update_lock); spin_lock_init(&ts->psy_stats_lock); INIT_WORK(&ts->hostcmd_req_work, thunderstrike_hostcmd_req_work_handler); hid_set_drvdata(hdev, shield_dev); ts->id = ida_alloc(&thunderstrike_ida, GFP_KERNEL); if (ts->id < 0) return ERR_PTR(ts->id); ts->haptics_dev = shield_haptics_create(shield_dev, thunderstrike_play_effect); if (IS_ERR(ts->haptics_dev)) { hid_err(hdev, "Failed to create Thunderstrike haptics instance\n"); ret = PTR_ERR(ts->haptics_dev); goto err_id; } ret = thunderstrike_psy_create(shield_dev); if (ret) { hid_err(hdev, "Failed to create Thunderstrike power supply instance\n"); goto err_haptics; } ret = thunderstrike_led_create(ts); if (ret) { hid_err(hdev, "Failed to create Thunderstrike LED instance\n"); goto err_psy; } timer_setup(&ts->psy_stats_timer, thunderstrike_psy_stats_timer_handler, 0); hid_info(hdev, "Registered Thunderstrike controller\n"); return shield_dev; err_psy: power_supply_unregister(shield_dev->battery_dev.psy); err_haptics: if (ts->haptics_dev) input_unregister_device(ts->haptics_dev); err_id: ida_free(&thunderstrike_ida, ts->id); return ERR_PTR(ret); } static void thunderstrike_destroy(struct thunderstrike *ts) { led_classdev_unregister(&ts->led_dev); power_supply_unregister(ts->base.battery_dev.psy); if (ts->haptics_dev) input_unregister_device(ts->haptics_dev); ida_free(&thunderstrike_ida, ts->id); } static int android_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case HID_USAGE_ANDROID_PLAYPAUSE_BTN: android_map_key(KEY_PLAYPAUSE); break; case HID_USAGE_ANDROID_VOLUMEUP_BTN: android_map_key(KEY_VOLUMEUP); break; case HID_USAGE_ANDROID_VOLUMEDOWN_BTN: android_map_key(KEY_VOLUMEDOWN); break; case HID_USAGE_ANDROID_SEARCH_BTN: android_map_key(BTN_Z); break; case HID_USAGE_ANDROID_HOME_BTN: android_map_key(BTN_MODE); break; case HID_USAGE_ANDROID_BACK_BTN: android_map_key(BTN_SELECT); break; default: return 0; } return 1; } static ssize_t firmware_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct shield_device *shield_dev; int ret; shield_dev = hid_get_drvdata(hdev); if (test_bit(SHIELD_FW_VERSION_INITIALIZED, &shield_dev->initialized_flags)) ret = sysfs_emit(buf, "0x%04X\n", shield_dev->fw_version); else ret = sysfs_emit(buf, NOT_INIT_STR "\n"); return ret; } static DEVICE_ATTR_RO(firmware_version); static ssize_t hardware_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct shield_device *shield_dev; char board_revision_str[4]; int ret; shield_dev = hid_get_drvdata(hdev); if (test_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags)) { shield_strrev(board_revision_str, 4, shield_dev->board_info.revision); ret = sysfs_emit(buf, "%s BOARD_REVISION_%s (0x%04X)\n", shield_dev->codename, board_revision_str, shield_dev->board_info.revision); } else ret = sysfs_emit(buf, NOT_INIT_STR "\n"); return ret; } static DEVICE_ATTR_RO(hardware_version); static ssize_t serial_number_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); struct shield_device *shield_dev; int ret; shield_dev = hid_get_drvdata(hdev); if (test_bit(SHIELD_BOARD_INFO_INITIALIZED, &shield_dev->initialized_flags)) ret = sysfs_emit(buf, "%s\n", shield_dev->board_info.serial_number); else ret = sysfs_emit(buf, NOT_INIT_STR "\n"); return ret; } static DEVICE_ATTR_RO(serial_number); static struct attribute *shield_device_attrs[] = { &dev_attr_firmware_version.attr, &dev_attr_hardware_version.attr, &dev_attr_serial_number.attr, NULL, }; ATTRIBUTE_GROUPS(shield_device); static int shield_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct shield_device *dev = hid_get_drvdata(hdev); return thunderstrike_parse_report(dev, report, data, size); } static int shield_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct shield_device *shield_dev = NULL; struct thunderstrike *ts; int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "Parse failed\n"); return ret; } switch (id->product) { case USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE_CONTROLLER: shield_dev = thunderstrike_create(hdev); break; } if (unlikely(!shield_dev)) { hid_err(hdev, "Failed to identify SHIELD device\n"); return -ENODEV; } if (IS_ERR(shield_dev)) { hid_err(hdev, "Failed to create SHIELD device\n"); return PTR_ERR(shield_dev); } ts = container_of(shield_dev, struct thunderstrike, base); ret = hid_hw_start(hdev, HID_CONNECT_HIDINPUT); if (ret) { hid_err(hdev, "Failed to start HID device\n"); goto err_ts_create; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "Failed to open HID device\n"); goto err_stop; } thunderstrike_device_init_info(shield_dev); return ret; err_stop: hid_hw_stop(hdev); err_ts_create: thunderstrike_destroy(ts); return ret; } static void shield_remove(struct hid_device *hdev) { struct shield_device *dev = hid_get_drvdata(hdev); struct thunderstrike *ts; ts = container_of(dev, struct thunderstrike, base); hid_hw_close(hdev); thunderstrike_destroy(ts); timer_delete_sync(&ts->psy_stats_timer); cancel_work_sync(&ts->hostcmd_req_work); hid_hw_stop(hdev); } static const struct hid_device_id shield_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NVIDIA, USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_NVIDIA, USB_DEVICE_ID_NVIDIA_THUNDERSTRIKE_CONTROLLER) }, { } }; MODULE_DEVICE_TABLE(hid, shield_devices); static struct hid_driver shield_driver = { .name = "shield", .id_table = shield_devices, .input_mapping = android_input_mapping, .probe = shield_probe, .remove = shield_remove, .raw_event = shield_raw_event, .driver = { .dev_groups = shield_device_groups, }, }; module_hid_driver(shield_driver); MODULE_AUTHOR("Rahul Rameshbabu <rrameshbabu@nvidia.com>"); MODULE_DESCRIPTION("HID Driver for NVIDIA SHIELD peripherals."); MODULE_LICENSE("GPL"); |
| 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 184 184 128 126 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 | // SPDX-License-Identifier: GPL-2.0-or-later /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * This file implements the various access functions for the * PROC file system. This is very similar to the IPv4 version, * except it reports the sockets in the INET6 address family. * * Authors: David S. Miller (davem@caip.rutgers.edu) * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> */ #include <linux/socket.h> #include <linux/net.h> #include <linux/ipv6.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stddef.h> #include <linux/export.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/sock.h> #include <net/tcp.h> #include <net/udp.h> #include <net/transp_v6.h> #include <net/ipv6.h> #define MAX4(a, b, c, d) \ MAX_T(u32, MAX_T(u32, a, b), MAX_T(u32, c, d)) #define SNMP_MIB_MAX MAX4(UDP_MIB_MAX, TCP_MIB_MAX, \ IPSTATS_MIB_MAX, ICMP_MIB_MAX) static int sockstat6_seq_show(struct seq_file *seq, void *v) { struct net *net = seq->private; seq_printf(seq, "TCP6: inuse %d\n", sock_prot_inuse_get(net, &tcpv6_prot)); seq_printf(seq, "UDP6: inuse %d\n", sock_prot_inuse_get(net, &udpv6_prot)); seq_printf(seq, "UDPLITE6: inuse %d\n", sock_prot_inuse_get(net, &udplitev6_prot)); seq_printf(seq, "RAW6: inuse %d\n", sock_prot_inuse_get(net, &rawv6_prot)); seq_printf(seq, "FRAG6: inuse %u memory %lu\n", atomic_read(&net->ipv6.fqdir->rhashtable.nelems), frag_mem_limit(net->ipv6.fqdir)); return 0; } static const struct snmp_mib snmp6_ipstats_list[] = { /* ipv6 mib according to RFC 2465 */ SNMP_MIB_ITEM("Ip6InReceives", IPSTATS_MIB_INPKTS), SNMP_MIB_ITEM("Ip6InHdrErrors", IPSTATS_MIB_INHDRERRORS), SNMP_MIB_ITEM("Ip6InTooBigErrors", IPSTATS_MIB_INTOOBIGERRORS), SNMP_MIB_ITEM("Ip6InNoRoutes", IPSTATS_MIB_INNOROUTES), SNMP_MIB_ITEM("Ip6InAddrErrors", IPSTATS_MIB_INADDRERRORS), SNMP_MIB_ITEM("Ip6InUnknownProtos", IPSTATS_MIB_INUNKNOWNPROTOS), SNMP_MIB_ITEM("Ip6InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS), SNMP_MIB_ITEM("Ip6InDiscards", IPSTATS_MIB_INDISCARDS), SNMP_MIB_ITEM("Ip6InDelivers", IPSTATS_MIB_INDELIVERS), SNMP_MIB_ITEM("Ip6OutForwDatagrams", IPSTATS_MIB_OUTFORWDATAGRAMS), SNMP_MIB_ITEM("Ip6OutRequests", IPSTATS_MIB_OUTREQUESTS), SNMP_MIB_ITEM("Ip6OutDiscards", IPSTATS_MIB_OUTDISCARDS), SNMP_MIB_ITEM("Ip6OutNoRoutes", IPSTATS_MIB_OUTNOROUTES), SNMP_MIB_ITEM("Ip6ReasmTimeout", IPSTATS_MIB_REASMTIMEOUT), SNMP_MIB_ITEM("Ip6ReasmReqds", IPSTATS_MIB_REASMREQDS), SNMP_MIB_ITEM("Ip6ReasmOKs", IPSTATS_MIB_REASMOKS), SNMP_MIB_ITEM("Ip6ReasmFails", IPSTATS_MIB_REASMFAILS), SNMP_MIB_ITEM("Ip6FragOKs", IPSTATS_MIB_FRAGOKS), SNMP_MIB_ITEM("Ip6FragFails", IPSTATS_MIB_FRAGFAILS), SNMP_MIB_ITEM("Ip6FragCreates", IPSTATS_MIB_FRAGCREATES), SNMP_MIB_ITEM("Ip6InMcastPkts", IPSTATS_MIB_INMCASTPKTS), SNMP_MIB_ITEM("Ip6OutMcastPkts", IPSTATS_MIB_OUTMCASTPKTS), SNMP_MIB_ITEM("Ip6InOctets", IPSTATS_MIB_INOCTETS), SNMP_MIB_ITEM("Ip6OutOctets", IPSTATS_MIB_OUTOCTETS), SNMP_MIB_ITEM("Ip6InMcastOctets", IPSTATS_MIB_INMCASTOCTETS), SNMP_MIB_ITEM("Ip6OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS), SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS), SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS), /* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */ SNMP_MIB_ITEM("Ip6InNoECTPkts", IPSTATS_MIB_NOECTPKTS), SNMP_MIB_ITEM("Ip6InECT1Pkts", IPSTATS_MIB_ECT1PKTS), SNMP_MIB_ITEM("Ip6InECT0Pkts", IPSTATS_MIB_ECT0PKTS), SNMP_MIB_ITEM("Ip6InCEPkts", IPSTATS_MIB_CEPKTS), SNMP_MIB_ITEM("Ip6OutTransmits", IPSTATS_MIB_OUTPKTS), SNMP_MIB_SENTINEL }; static const struct snmp_mib snmp6_icmp6_list[] = { /* icmpv6 mib according to RFC 2466 */ SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS), SNMP_MIB_ITEM("Icmp6InCsumErrors", ICMP6_MIB_CSUMERRORS), SNMP_MIB_ITEM("Icmp6OutRateLimitHost", ICMP6_MIB_RATELIMITHOST), SNMP_MIB_SENTINEL }; /* RFC 4293 v6 ICMPMsgStatsTable; named items for RFC 2466 compatibility */ static const char *const icmp6type2name[256] = { [ICMPV6_DEST_UNREACH] = "DestUnreachs", [ICMPV6_PKT_TOOBIG] = "PktTooBigs", [ICMPV6_TIME_EXCEED] = "TimeExcds", [ICMPV6_PARAMPROB] = "ParmProblems", [ICMPV6_ECHO_REQUEST] = "Echos", [ICMPV6_ECHO_REPLY] = "EchoReplies", [ICMPV6_MGM_QUERY] = "GroupMembQueries", [ICMPV6_MGM_REPORT] = "GroupMembResponses", [ICMPV6_MGM_REDUCTION] = "GroupMembReductions", [ICMPV6_MLD2_REPORT] = "MLDv2Reports", [NDISC_ROUTER_ADVERTISEMENT] = "RouterAdvertisements", [NDISC_ROUTER_SOLICITATION] = "RouterSolicits", [NDISC_NEIGHBOUR_ADVERTISEMENT] = "NeighborAdvertisements", [NDISC_NEIGHBOUR_SOLICITATION] = "NeighborSolicits", [NDISC_REDIRECT] = "Redirects", }; static const struct snmp_mib snmp6_udp6_list[] = { SNMP_MIB_ITEM("Udp6InDatagrams", UDP_MIB_INDATAGRAMS), SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS), SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS), SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS), SNMP_MIB_ITEM("Udp6IgnoredMulti", UDP_MIB_IGNOREDMULTI), SNMP_MIB_ITEM("Udp6MemErrors", UDP_MIB_MEMERRORS), SNMP_MIB_SENTINEL }; static const struct snmp_mib snmp6_udplite6_list[] = { SNMP_MIB_ITEM("UdpLite6InDatagrams", UDP_MIB_INDATAGRAMS), SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS), SNMP_MIB_ITEM("UdpLite6RcvbufErrors", UDP_MIB_RCVBUFERRORS), SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_ITEM("UdpLite6InCsumErrors", UDP_MIB_CSUMERRORS), SNMP_MIB_ITEM("UdpLite6MemErrors", UDP_MIB_MEMERRORS), SNMP_MIB_SENTINEL }; static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, atomic_long_t *smib) { char name[32]; int i; /* print by name -- deprecated items */ for (i = 0; i < ICMP6MSG_MIB_MAX; i++) { int icmptype; const char *p; icmptype = i & 0xff; p = icmp6type2name[icmptype]; if (!p) /* don't print un-named types here */ continue; snprintf(name, sizeof(name), "Icmp6%s%s", i & 0x100 ? "Out" : "In", p); seq_printf(seq, "%-32s\t%lu\n", name, atomic_long_read(smib + i)); } /* print by number (nonzero only) - ICMPMsgStat format */ for (i = 0; i < ICMP6MSG_MIB_MAX; i++) { unsigned long val; val = atomic_long_read(smib + i); if (!val) continue; snprintf(name, sizeof(name), "Icmp6%sType%u", i & 0x100 ? "Out" : "In", i & 0xff); seq_printf(seq, "%-32s\t%lu\n", name, val); } } /* can be called either with percpu mib (pcpumib != NULL), * or shared one (smib != NULL) */ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu *pcpumib, atomic_long_t *smib, const struct snmp_mib *itemlist) { unsigned long buff[SNMP_MIB_MAX]; int i; if (pcpumib) { memset(buff, 0, sizeof(unsigned long) * SNMP_MIB_MAX); snmp_get_cpu_field_batch(buff, itemlist, pcpumib); for (i = 0; itemlist[i].name; i++) seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, buff[i]); } else { for (i = 0; itemlist[i].name; i++) seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, atomic_long_read(smib + itemlist[i].entry)); } } static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib, const struct snmp_mib *itemlist, size_t syncpoff) { u64 buff64[SNMP_MIB_MAX]; int i; memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX); snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff); for (i = 0; itemlist[i].name; i++) seq_printf(seq, "%-32s\t%llu\n", itemlist[i].name, buff64[i]); } static int snmp6_seq_show(struct seq_file *seq, void *v) { struct net *net = (struct net *)seq->private; snmp6_seq_show_item64(seq, net->mib.ipv6_statistics, snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); snmp6_seq_show_item(seq, net->mib.icmpv6_statistics, NULL, snmp6_icmp6_list); snmp6_seq_show_icmpv6msg(seq, net->mib.icmpv6msg_statistics->mibs); snmp6_seq_show_item(seq, net->mib.udp_stats_in6, NULL, snmp6_udp6_list); snmp6_seq_show_item(seq, net->mib.udplite_stats_in6, NULL, snmp6_udplite6_list); return 0; } static int snmp6_dev_seq_show(struct seq_file *seq, void *v) { struct inet6_dev *idev = (struct inet6_dev *)seq->private; seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); snmp6_seq_show_item64(seq, idev->stats.ipv6, snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs, snmp6_icmp6_list); snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs); return 0; } int snmp6_register_dev(struct inet6_dev *idev) { struct proc_dir_entry *p; struct net *net; if (!idev || !idev->dev) return -EINVAL; net = dev_net(idev->dev); if (!net->mib.proc_net_devsnmp6) return -ENOENT; p = proc_create_single_data(idev->dev->name, 0444, net->mib.proc_net_devsnmp6, snmp6_dev_seq_show, idev); if (!p) return -ENOMEM; idev->stats.proc_dir_entry = p; return 0; } int snmp6_unregister_dev(struct inet6_dev *idev) { struct net *net = dev_net(idev->dev); if (!net->mib.proc_net_devsnmp6) return -ENOENT; if (!idev->stats.proc_dir_entry) return -EINVAL; proc_remove(idev->stats.proc_dir_entry); idev->stats.proc_dir_entry = NULL; return 0; } static int __net_init ipv6_proc_init_net(struct net *net) { if (!proc_create_net_single("sockstat6", 0444, net->proc_net, sockstat6_seq_show, NULL)) return -ENOMEM; if (!proc_create_net_single("snmp6", 0444, net->proc_net, snmp6_seq_show, NULL)) goto proc_snmp6_fail; net->mib.proc_net_devsnmp6 = proc_mkdir("dev_snmp6", net->proc_net); if (!net->mib.proc_net_devsnmp6) goto proc_dev_snmp6_fail; return 0; proc_dev_snmp6_fail: remove_proc_entry("snmp6", net->proc_net); proc_snmp6_fail: remove_proc_entry("sockstat6", net->proc_net); return -ENOMEM; } static void __net_exit ipv6_proc_exit_net(struct net *net) { remove_proc_entry("sockstat6", net->proc_net); remove_proc_entry("dev_snmp6", net->proc_net); remove_proc_entry("snmp6", net->proc_net); } static struct pernet_operations ipv6_proc_ops = { .init = ipv6_proc_init_net, .exit = ipv6_proc_exit_net, }; int __init ipv6_misc_proc_init(void) { return register_pernet_subsys(&ipv6_proc_ops); } void ipv6_misc_proc_exit(void) { unregister_pernet_subsys(&ipv6_proc_ops); } |
| 3678 3635 1686 34 1526 1043 30 2746 1532 27 14 1525 222 4043 435 4043 74 3989 233 3946 283 38 160 50 89 86 89 89 88 76 16 89 89 996 31 1176 1177 30 30 30 30 953 50 63 16 964 963 226 984 111 1803 823 1417 31 116 166 44 1 148 2 163 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_FS_NOTIFY_H #define _LINUX_FS_NOTIFY_H /* * include/linux/fsnotify.h - generic hooks for filesystem notification, to * reduce in-source duplication from both dnotify and inotify. * * We don't compile any of this away in some complicated menagerie of ifdefs. * Instead, we rely on the code inside to optimize away as needed. * * (C) Copyright 2005 Robert Love */ #include <linux/fsnotify_backend.h> #include <linux/audit.h> #include <linux/slab.h> #include <linux/bug.h> /* Are there any inode/mount/sb objects watched with priority prio or above? */ static inline bool fsnotify_sb_has_priority_watchers(struct super_block *sb, int prio) { struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); /* Were any marks ever added to any object on this sb? */ if (!sbinfo) return false; return atomic_long_read(&sbinfo->watched_objects[prio]); } /* Are there any inode/mount/sb objects that are being watched at all? */ static inline bool fsnotify_sb_has_watchers(struct super_block *sb) { return fsnotify_sb_has_priority_watchers(sb, 0); } /* * Notify this @dir inode about a change in a child directory entry. * The directory entry may have turned positive or negative or its inode may * have changed (i.e. renamed over). * * Unlike fsnotify_parent(), the event will be reported regardless of the * FS_EVENT_ON_CHILD mask on the parent inode and will not be reported if only * the child is interested and not the parent. */ static inline int fsnotify_name(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *name, u32 cookie) { if (!fsnotify_sb_has_watchers(dir->i_sb)) return 0; return fsnotify(mask, data, data_type, dir, name, NULL, cookie); } static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, __u32 mask) { fsnotify_name(mask, dentry, FSNOTIFY_EVENT_DENTRY, dir, &dentry->d_name, 0); } static inline void fsnotify_inode(struct inode *inode, __u32 mask) { if (!fsnotify_sb_has_watchers(inode->i_sb)) return; if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; fsnotify(mask, inode, FSNOTIFY_EVENT_INODE, NULL, NULL, inode, 0); } /* Notify this dentry's parent about a child's events. */ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { struct inode *inode = d_inode(dentry); if (!fsnotify_sb_has_watchers(inode->i_sb)) return 0; if (S_ISDIR(inode->i_mode)) { mask |= FS_ISDIR; /* sb/mount marks are not interested in name of directory */ if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) goto notify_child; } /* disconnected dentry cannot notify parent */ if (IS_ROOT(dentry)) goto notify_child; return __fsnotify_parent(dentry, mask, data, data_type); notify_child: return fsnotify(mask, data, data_type, NULL, NULL, inode, 0); } /* * Simple wrappers to consolidate calls to fsnotify_parent() when an event * is on a file/dentry. */ static inline void fsnotify_dentry(struct dentry *dentry, __u32 mask) { fsnotify_parent(dentry, mask, dentry, FSNOTIFY_EVENT_DENTRY); } static inline int fsnotify_path(const struct path *path, __u32 mask) { return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); } static inline int fsnotify_file(struct file *file, __u32 mask) { /* * FMODE_NONOTIFY are fds generated by fanotify itself which should not * generate new events. We also don't want to generate events for * FMODE_PATH fds (involves open & close events) as they are just * handle creation / destruction events and not "real" file events. */ if (FMODE_FSNOTIFY_NONE(file->f_mode)) return 0; return fsnotify_path(&file->f_path, mask); } #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS int fsnotify_open_perm_and_set_mode(struct file *file); /* * fsnotify_file_area_perm - permission hook before access to file range */ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, const loff_t *ppos, size_t count) { /* * filesystem may be modified in the context of permission events * (e.g. by HSM filling a file on access), so sb freeze protection * must not be held. */ lockdep_assert_once(file_write_not_started(file)); if (!(perm_mask & (MAY_READ | MAY_WRITE | MAY_ACCESS))) return 0; /* * read()/write() and other types of access generate pre-content events. */ if (unlikely(FMODE_FSNOTIFY_HSM(file->f_mode))) { int ret = fsnotify_pre_content(&file->f_path, ppos, count); if (ret) return ret; } if (!(perm_mask & MAY_READ) || likely(!FMODE_FSNOTIFY_ACCESS_PERM(file->f_mode))) return 0; /* * read() also generates the legacy FS_ACCESS_PERM event, so content * scanners can inspect the content filled by pre-content event. */ return fsnotify_path(&file->f_path, FS_ACCESS_PERM); } /* * fsnotify_mmap_perm - permission hook before mmap of file range */ static inline int fsnotify_mmap_perm(struct file *file, int prot, const loff_t off, size_t len) { /* * mmap() generates only pre-content events. */ if (!file || likely(!FMODE_FSNOTIFY_HSM(file->f_mode))) return 0; return fsnotify_pre_content(&file->f_path, &off, len); } /* * fsnotify_truncate_perm - permission hook before file truncate */ static inline int fsnotify_truncate_perm(const struct path *path, loff_t length) { struct inode *inode = d_inode(path->dentry); if (!(inode->i_sb->s_iflags & SB_I_ALLOW_HSM) || !fsnotify_sb_has_priority_watchers(inode->i_sb, FSNOTIFY_PRIO_PRE_CONTENT)) return 0; return fsnotify_pre_content(path, &length, 0); } /* * fsnotify_file_perm - permission hook before file access (unknown range) */ static inline int fsnotify_file_perm(struct file *file, int perm_mask) { return fsnotify_file_area_perm(file, perm_mask, NULL, 0); } #else static inline int fsnotify_open_perm_and_set_mode(struct file *file) { return 0; } static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, const loff_t *ppos, size_t count) { return 0; } static inline int fsnotify_mmap_perm(struct file *file, int prot, const loff_t off, size_t len) { return 0; } static inline int fsnotify_truncate_perm(const struct path *path, loff_t length) { return 0; } static inline int fsnotify_file_perm(struct file *file, int perm_mask) { return 0; } #endif /* * fsnotify_link_count - inode's link count changed */ static inline void fsnotify_link_count(struct inode *inode) { fsnotify_inode(inode, FS_ATTRIB); } /* * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir */ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, const struct qstr *old_name, int isdir, struct inode *target, struct dentry *moved) { struct inode *source = moved->d_inode; u32 fs_cookie = fsnotify_get_cookie(); __u32 old_dir_mask = FS_MOVED_FROM; __u32 new_dir_mask = FS_MOVED_TO; __u32 rename_mask = FS_RENAME; const struct qstr *new_name = &moved->d_name; if (isdir) { old_dir_mask |= FS_ISDIR; new_dir_mask |= FS_ISDIR; rename_mask |= FS_ISDIR; } /* Event with information about both old and new parent+name */ fsnotify_name(rename_mask, moved, FSNOTIFY_EVENT_DENTRY, old_dir, old_name, 0); fsnotify_name(old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_dir, old_name, fs_cookie); fsnotify_name(new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_dir, new_name, fs_cookie); if (target) fsnotify_link_count(target); fsnotify_inode(source, FS_MOVE_SELF); audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE); } /* * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed */ static inline void fsnotify_inode_delete(struct inode *inode) { __fsnotify_inode_delete(inode); } /* * fsnotify_vfsmount_delete - a vfsmount is being destroyed, clean up is needed */ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) { __fsnotify_vfsmount_delete(mnt); } static inline void fsnotify_mntns_delete(struct mnt_namespace *mntns) { __fsnotify_mntns_delete(mntns); } /* * fsnotify_inoderemove - an inode is going away */ static inline void fsnotify_inoderemove(struct inode *inode) { fsnotify_inode(inode, FS_DELETE_SELF); __fsnotify_inode_delete(inode); } /* * fsnotify_create - 'name' was linked in * * Caller must make sure that dentry->d_name is stable. * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate * ->d_inode later */ static inline void fsnotify_create(struct inode *dir, struct dentry *dentry) { audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE); fsnotify_dirent(dir, dentry, FS_CREATE); } /* * fsnotify_link - new hardlink in 'inode' directory * * Caller must make sure that new_dentry->d_name is stable. * Note: We have to pass also the linked inode ptr as some filesystems leave * new_dentry->d_inode NULL and instantiate inode pointer later */ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) { fsnotify_link_count(inode); audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE); fsnotify_name(FS_CREATE, inode, FSNOTIFY_EVENT_INODE, dir, &new_dentry->d_name, 0); } /* * fsnotify_delete - @dentry was unlinked and unhashed * * Caller must make sure that dentry->d_name is stable. * * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode * as this may be called after d_delete() and old_dentry may be negative. */ static inline void fsnotify_delete(struct inode *dir, struct inode *inode, struct dentry *dentry) { __u32 mask = FS_DELETE; if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; fsnotify_name(mask, inode, FSNOTIFY_EVENT_INODE, dir, &dentry->d_name, 0); } /** * d_delete_notify - delete a dentry and call fsnotify_delete() * @dentry: The dentry to delete * * This helper is used to guaranty that the unlinked inode cannot be found * by lookup of this name after fsnotify_delete() event has been delivered. */ static inline void d_delete_notify(struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(dentry); ihold(inode); d_delete(dentry); fsnotify_delete(dir, inode, dentry); iput(inode); } /* * fsnotify_unlink - 'name' was unlinked * * Caller must make sure that dentry->d_name is stable. */ static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry) { if (WARN_ON_ONCE(d_is_negative(dentry))) return; fsnotify_delete(dir, d_inode(dentry), dentry); } /* * fsnotify_mkdir - directory 'name' was created * * Caller must make sure that dentry->d_name is stable. * Note: some filesystems (e.g. kernfs) leave @dentry negative and instantiate * ->d_inode later */ static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry) { audit_inode_child(dir, dentry, AUDIT_TYPE_CHILD_CREATE); fsnotify_dirent(dir, dentry, FS_CREATE | FS_ISDIR); } /* * fsnotify_rmdir - directory 'name' was removed * * Caller must make sure that dentry->d_name is stable. */ static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry) { if (WARN_ON_ONCE(d_is_negative(dentry))) return; fsnotify_delete(dir, d_inode(dentry), dentry); } /* * fsnotify_access - file was read */ static inline void fsnotify_access(struct file *file) { fsnotify_file(file, FS_ACCESS); } /* * fsnotify_modify - file was modified */ static inline void fsnotify_modify(struct file *file) { fsnotify_file(file, FS_MODIFY); } /* * fsnotify_open - file was opened */ static inline void fsnotify_open(struct file *file) { __u32 mask = FS_OPEN; if (file->f_flags & __FMODE_EXEC) mask |= FS_OPEN_EXEC; fsnotify_file(file, mask); } /* * fsnotify_close - file was closed */ static inline void fsnotify_close(struct file *file) { __u32 mask = (file->f_mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; fsnotify_file(file, mask); } /* * fsnotify_xattr - extended attributes were changed */ static inline void fsnotify_xattr(struct dentry *dentry) { fsnotify_dentry(dentry, FS_ATTRIB); } /* * fsnotify_change - notify_change event. file was modified and/or metadata * was changed. */ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) { __u32 mask = 0; if (ia_valid & ATTR_UID) mask |= FS_ATTRIB; if (ia_valid & ATTR_GID) mask |= FS_ATTRIB; if (ia_valid & ATTR_SIZE) mask |= FS_MODIFY; /* both times implies a utime(s) call */ if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) mask |= FS_ATTRIB; else if (ia_valid & ATTR_ATIME) mask |= FS_ACCESS; else if (ia_valid & ATTR_MTIME) mask |= FS_MODIFY; if (ia_valid & ATTR_MODE) mask |= FS_ATTRIB; if (mask) fsnotify_dentry(dentry, mask); } static inline int fsnotify_sb_error(struct super_block *sb, struct inode *inode, int error) { struct fs_error_report report = { .error = error, .inode = inode, .sb = sb, }; return fsnotify(FS_ERROR, &report, FSNOTIFY_EVENT_ERROR, NULL, NULL, NULL, 0); } static inline void fsnotify_mnt_attach(struct mnt_namespace *ns, struct vfsmount *mnt) { fsnotify_mnt(FS_MNT_ATTACH, ns, mnt); } static inline void fsnotify_mnt_detach(struct mnt_namespace *ns, struct vfsmount *mnt) { fsnotify_mnt(FS_MNT_DETACH, ns, mnt); } static inline void fsnotify_mnt_move(struct mnt_namespace *ns, struct vfsmount *mnt) { fsnotify_mnt(FS_MNT_MOVE, ns, mnt); } #endif /* _LINUX_FS_NOTIFY_H */ |
| 45 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_TTY_DRIVER_H #define _LINUX_TTY_DRIVER_H #include <linux/export.h> #include <linux/fs.h> #include <linux/kref.h> #include <linux/list.h> #include <linux/cdev.h> #include <linux/uaccess.h> #include <linux/termios.h> #include <linux/seq_file.h> struct tty_struct; struct tty_driver; struct serial_icounter_struct; struct serial_struct; /** * enum tty_driver_flag -- TTY Driver Flags * * These are flags passed to tty_alloc_driver(). * * @TTY_DRIVER_INSTALLED: * Whether this driver was succesfully installed. This is a tty internal * flag. Do not touch. * * @TTY_DRIVER_RESET_TERMIOS: * Requests the tty layer to reset the termios setting when the last * process has closed the device. Used for PTYs, in particular. * * @TTY_DRIVER_REAL_RAW: * Indicates that the driver will guarantee not to set any special * character handling flags if this is set for the tty: * * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)`` * * That is, if there is no reason for the driver to * send notifications of parity and break characters up to the line * driver, it won't do so. This allows the line driver to optimize for * this case if this flag is set. (Note that there is also a promise, if * the above case is true, not to signal overruns, either.) * * @TTY_DRIVER_DYNAMIC_DEV: * The individual tty devices need to be registered with a call to * tty_register_device() when the device is found in the system and * unregistered with a call to tty_unregister_device() so the devices will * be show up properly in sysfs. If not set, all &tty_driver.num entries * will be created by the tty core in sysfs when tty_register_driver() is * called. This is to be used by drivers that have tty devices that can * appear and disappear while the main tty driver is registered with the * tty core. * * @TTY_DRIVER_DEVPTS_MEM: * Don't use the standard arrays (&tty_driver.ttys and * &tty_driver.termios), instead use dynamic memory keyed through the * devpts filesystem. This is only applicable to the PTY driver. * * @TTY_DRIVER_HARDWARE_BREAK: * Hardware handles break signals. Pass the requested timeout to the * &tty_operations.break_ctl instead of using a simple on/off interface. * * @TTY_DRIVER_DYNAMIC_ALLOC: * Do not allocate structures which are needed per line for this driver * (&tty_driver.ports) as it would waste memory. The driver will take * care. This is only applicable to the PTY driver. * * @TTY_DRIVER_UNNUMBERED_NODE: * Do not create numbered ``/dev`` nodes. For example, create * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a * driver for a single tty device is being allocated. */ enum tty_driver_flag { TTY_DRIVER_INSTALLED = BIT(0), TTY_DRIVER_RESET_TERMIOS = BIT(1), TTY_DRIVER_REAL_RAW = BIT(2), TTY_DRIVER_DYNAMIC_DEV = BIT(3), TTY_DRIVER_DEVPTS_MEM = BIT(4), TTY_DRIVER_HARDWARE_BREAK = BIT(5), TTY_DRIVER_DYNAMIC_ALLOC = BIT(6), TTY_DRIVER_UNNUMBERED_NODE = BIT(7), }; enum tty_driver_type { TTY_DRIVER_TYPE_SYSTEM, TTY_DRIVER_TYPE_CONSOLE, TTY_DRIVER_TYPE_SERIAL, TTY_DRIVER_TYPE_PTY, TTY_DRIVER_TYPE_SCC, TTY_DRIVER_TYPE_SYSCONS, }; enum tty_driver_subtype { SYSTEM_TYPE_TTY = 1, SYSTEM_TYPE_CONSOLE, SYSTEM_TYPE_SYSCONS, SYSTEM_TYPE_SYSPTMX, PTY_TYPE_MASTER = 1, PTY_TYPE_SLAVE, SERIAL_TYPE_NORMAL = 1, }; /** * struct tty_operations -- interface between driver and tty * * @lookup: ``struct tty_struct *()(struct tty_driver *self, struct file *, * int idx)`` * * Return the tty device corresponding to @idx, %NULL if there is not * one currently in use and an %ERR_PTR value on error. Called under * %tty_mutex (for now!) * * Optional method. Default behaviour is to use the @self->ttys array. * * @install: ``int ()(struct tty_driver *self, struct tty_struct *tty)`` * * Install a new @tty into the @self's internal tables. Used in * conjunction with @lookup and @remove methods. * * Optional method. Default behaviour is to use the @self->ttys array. * * @remove: ``void ()(struct tty_driver *self, struct tty_struct *tty)`` * * Remove a closed @tty from the @self's internal tables. Used in * conjunction with @lookup and @remove methods. * * Optional method. Default behaviour is to use the @self->ttys array. * * @open: ``int ()(struct tty_struct *tty, struct file *)`` * * This routine is called when a particular @tty device is opened. This * routine is mandatory; if this routine is not filled in, the attempted * open will fail with %ENODEV. * * Required method. Called with tty lock held. May sleep. * * @close: ``void ()(struct tty_struct *tty, struct file *)`` * * This routine is called when a particular @tty device is closed. At the * point of return from this call the driver must make no further ldisc * calls of any kind. * * Remark: called even if the corresponding @open() failed. * * Required method. Called with tty lock held. May sleep. * * @shutdown: ``void ()(struct tty_struct *tty)`` * * This routine is called under the tty lock when a particular @tty device * is closed for the last time. It executes before the @tty resources * are freed so may execute while another function holds a @tty kref. * * @cleanup: ``void ()(struct tty_struct *tty)`` * * This routine is called asynchronously when a particular @tty device * is closed for the last time freeing up the resources. This is * actually the second part of shutdown for routines that might sleep. * * @write: ``ssize_t ()(struct tty_struct *tty, const u8 *buf, size_t count)`` * * This routine is called by the kernel to write a series (@count) of * characters (@buf) to the @tty device. The characters may come from * user space or kernel space. This routine will return the * number of characters actually accepted for writing. * * May occur in parallel in special cases. Because this includes panic * paths drivers generally shouldn't try and do clever locking here. * * Optional: Required for writable devices. May not sleep. * * @put_char: ``int ()(struct tty_struct *tty, u8 ch)`` * * This routine is called by the kernel to write a single character @ch to * the @tty device. If the kernel uses this routine, it must call the * @flush_chars() routine (if defined) when it is done stuffing characters * into the driver. If there is no room in the queue, the character is * ignored. * * Optional: Kernel will use the @write method if not provided. Do not * call this function directly, call tty_put_char(). * * @flush_chars: ``void ()(struct tty_struct *tty)`` * * This routine is called by the kernel after it has written a * series of characters to the tty device using @put_char(). * * Optional. Do not call this function directly, call * tty_driver_flush_chars(). * * @write_room: ``unsigned int ()(struct tty_struct *tty)`` * * This routine returns the numbers of characters the @tty driver * will accept for queuing to be written. This number is subject * to change as output buffers get emptied, or if the output flow * control is acted. * * The ldisc is responsible for being intelligent about multi-threading of * write_room/write calls * * Required if @write method is provided else not needed. Do not call this * function directly, call tty_write_room() * * @chars_in_buffer: ``unsigned int ()(struct tty_struct *tty)`` * * This routine returns the number of characters in the device private * output queue. Used in tty_wait_until_sent() and for poll() * implementation. * * Optional: if not provided, it is assumed there is no queue on the * device. Do not call this function directly, call tty_chars_in_buffer(). * * @ioctl: ``int ()(struct tty_struct *tty, unsigned int cmd, * unsigned long arg)`` * * This routine allows the @tty driver to implement device-specific * ioctls. If the ioctl number passed in @cmd is not recognized by the * driver, it should return %ENOIOCTLCMD. * * Optional. * * @compat_ioctl: ``long ()(struct tty_struct *tty, unsigned int cmd, * unsigned long arg)`` * * Implement ioctl processing for 32 bit process on 64 bit system. * * Optional. * * @set_termios: ``void ()(struct tty_struct *tty, const struct ktermios *old)`` * * This routine allows the @tty driver to be notified when device's * termios settings have changed. New settings are in @tty->termios. * Previous settings are passed in the @old argument. * * The API is defined such that the driver should return the actual modes * selected. This means that the driver is responsible for modifying any * bits in @tty->termios it cannot fulfill to indicate the actual modes * being used. * * Optional. Called under the @tty->termios_rwsem. May sleep. * * @ldisc_ok: ``int ()(struct tty_struct *tty, int ldisc)`` * * This routine allows the @tty driver to decide if it can deal * with a particular @ldisc. * * Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem. * * @set_ldisc: ``void ()(struct tty_struct *tty)`` * * This routine allows the @tty driver to be notified when the device's * line discipline is being changed. At the point this is done the * discipline is not yet usable. * * Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem. * * @throttle: ``void ()(struct tty_struct *tty)`` * * This routine notifies the @tty driver that input buffers for the line * discipline are close to full, and it should somehow signal that no more * characters should be sent to the @tty. * * Serialization including with @unthrottle() is the job of the ldisc * layer. * * Optional: Always invoke via tty_throttle_safe(). Called under the * @tty->termios_rwsem. * * @unthrottle: ``void ()(struct tty_struct *tty)`` * * This routine notifies the @tty driver that it should signal that * characters can now be sent to the @tty without fear of overrunning the * input buffers of the line disciplines. * * Optional. Always invoke via tty_unthrottle(). Called under the * @tty->termios_rwsem. * * @stop: ``void ()(struct tty_struct *tty)`` * * This routine notifies the @tty driver that it should stop outputting * characters to the tty device. * * Called with @tty->flow.lock held. Serialized with @start() method. * * Optional. Always invoke via stop_tty(). * * @start: ``void ()(struct tty_struct *tty)`` * * This routine notifies the @tty driver that it resumed sending * characters to the @tty device. * * Called with @tty->flow.lock held. Serialized with stop() method. * * Optional. Always invoke via start_tty(). * * @hangup: ``void ()(struct tty_struct *tty)`` * * This routine notifies the @tty driver that it should hang up the @tty * device. * * Optional. Called with tty lock held. * * @break_ctl: ``int ()(struct tty_struct *tty, int state)`` * * This optional routine requests the @tty driver to turn on or off BREAK * status on the RS-232 port. If @state is -1, then the BREAK status * should be turned on; if @state is 0, then BREAK should be turned off. * * If this routine is implemented, the high-level tty driver will handle * the following ioctls: %TCSBRK, %TCSBRKP, %TIOCSBRK, %TIOCCBRK. * * If the driver sets %TTY_DRIVER_HARDWARE_BREAK in tty_alloc_driver(), * then the interface will also be called with actual times and the * hardware is expected to do the delay work itself. 0 and -1 are still * used for on/off. * * Optional: Required for %TCSBRK/%BRKP/etc. handling. May sleep. * * @flush_buffer: ``void ()(struct tty_struct *tty)`` * * This routine discards device private output buffer. Invoked on close, * hangup, to implement %TCOFLUSH ioctl and similar. * * Optional: if not provided, it is assumed there is no queue on the * device. Do not call this function directly, call * tty_driver_flush_buffer(). * * @wait_until_sent: ``void ()(struct tty_struct *tty, int timeout)`` * * This routine waits until the device has written out all of the * characters in its transmitter FIFO. Or until @timeout (in jiffies) is * reached. * * Optional: If not provided, the device is assumed to have no FIFO. * Usually correct to invoke via tty_wait_until_sent(). May sleep. * * @send_xchar: ``void ()(struct tty_struct *tty, u8 ch)`` * * This routine is used to send a high-priority XON/XOFF character (@ch) * to the @tty device. * * Optional: If not provided, then the @write method is called under * the @tty->atomic_write_lock to keep it serialized with the ldisc. * * @tiocmget: ``int ()(struct tty_struct *tty)`` * * This routine is used to obtain the modem status bits from the @tty * driver. * * Optional: If not provided, then %ENOTTY is returned from the %TIOCMGET * ioctl. Do not call this function directly, call tty_tiocmget(). * * @tiocmset: ``int ()(struct tty_struct *tty, * unsigned int set, unsigned int clear)`` * * This routine is used to set the modem status bits to the @tty driver. * First, @clear bits should be cleared, then @set bits set. * * Optional: If not provided, then %ENOTTY is returned from the %TIOCMSET * ioctl. Do not call this function directly, call tty_tiocmset(). * * @resize: ``int ()(struct tty_struct *tty, struct winsize *ws)`` * * Called when a termios request is issued which changes the requested * terminal geometry to @ws. * * Optional: the default action is to update the termios structure * without error. This is usually the correct behaviour. Drivers should * not force errors here if they are not resizable objects (e.g. a serial * line). See tty_do_resize() if you need to wrap the standard method * in your own logic -- the usual case. * * @get_icount: ``int ()(struct tty_struct *tty, * struct serial_icounter *icount)`` * * Called when the @tty device receives a %TIOCGICOUNT ioctl. Passed a * kernel structure @icount to complete. * * Optional: called only if provided, otherwise %ENOTTY will be returned. * * @get_serial: ``int ()(struct tty_struct *tty, struct serial_struct *p)`` * * Called when the @tty device receives a %TIOCGSERIAL ioctl. Passed a * kernel structure @p (&struct serial_struct) to complete. * * Optional: called only if provided, otherwise %ENOTTY will be returned. * Do not call this function directly, call tty_tiocgserial(). * * @set_serial: ``int ()(struct tty_struct *tty, struct serial_struct *p)`` * * Called when the @tty device receives a %TIOCSSERIAL ioctl. Passed a * kernel structure @p (&struct serial_struct) to set the values from. * * Optional: called only if provided, otherwise %ENOTTY will be returned. * Do not call this function directly, call tty_tiocsserial(). * * @show_fdinfo: ``void ()(struct tty_struct *tty, struct seq_file *m)`` * * Called when the @tty device file descriptor receives a fdinfo request * from VFS (to show in /proc/<pid>/fdinfo/). @m should be filled with * information. * * Optional: called only if provided, otherwise nothing is written to @m. * Do not call this function directly, call tty_show_fdinfo(). * * @poll_init: ``int ()(struct tty_driver *driver, int line, char *options)`` * * kgdboc support (Documentation/process/debugging/kgdb.rst). This routine is * called to initialize the HW for later use by calling @poll_get_char or * @poll_put_char. * * Optional: called only if provided, otherwise skipped as a non-polling * driver. * * @poll_get_char: ``int ()(struct tty_driver *driver, int line)`` * * kgdboc support (see @poll_init). @driver should read a character from a * tty identified by @line and return it. * * Optional: called only if @poll_init provided. * * @poll_put_char: ``void ()(struct tty_driver *driver, int line, char ch)`` * * kgdboc support (see @poll_init). @driver should write character @ch to * a tty identified by @line. * * Optional: called only if @poll_init provided. * * @proc_show: ``int ()(struct seq_file *m, void *driver)`` * * Driver @driver (cast to &struct tty_driver) can show additional info in * /proc/tty/driver/<driver_name>. It is enough to fill in the information * into @m. * * Optional: called only if provided, otherwise no /proc entry created. * * This structure defines the interface between the low-level tty driver and * the tty routines. These routines can be defined. Unless noted otherwise, * they are optional, and can be filled in with a %NULL pointer. */ struct tty_operations { struct tty_struct * (*lookup)(struct tty_driver *driver, struct file *filp, int idx); int (*install)(struct tty_driver *driver, struct tty_struct *tty); void (*remove)(struct tty_driver *driver, struct tty_struct *tty); int (*open)(struct tty_struct * tty, struct file * filp); void (*close)(struct tty_struct * tty, struct file * filp); void (*shutdown)(struct tty_struct *tty); void (*cleanup)(struct tty_struct *tty); ssize_t (*write)(struct tty_struct *tty, const u8 *buf, size_t count); int (*put_char)(struct tty_struct *tty, u8 ch); void (*flush_chars)(struct tty_struct *tty); unsigned int (*write_room)(struct tty_struct *tty); unsigned int (*chars_in_buffer)(struct tty_struct *tty); int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); long (*compat_ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg); void (*set_termios)(struct tty_struct *tty, const struct ktermios *old); void (*throttle)(struct tty_struct * tty); void (*unthrottle)(struct tty_struct * tty); void (*stop)(struct tty_struct *tty); void (*start)(struct tty_struct *tty); void (*hangup)(struct tty_struct *tty); int (*break_ctl)(struct tty_struct *tty, int state); void (*flush_buffer)(struct tty_struct *tty); int (*ldisc_ok)(struct tty_struct *tty, int ldisc); void (*set_ldisc)(struct tty_struct *tty); void (*wait_until_sent)(struct tty_struct *tty, int timeout); void (*send_xchar)(struct tty_struct *tty, u8 ch); int (*tiocmget)(struct tty_struct *tty); int (*tiocmset)(struct tty_struct *tty, unsigned int set, unsigned int clear); int (*resize)(struct tty_struct *tty, struct winsize *ws); int (*get_icount)(struct tty_struct *tty, struct serial_icounter_struct *icount); int (*get_serial)(struct tty_struct *tty, struct serial_struct *p); int (*set_serial)(struct tty_struct *tty, struct serial_struct *p); void (*show_fdinfo)(struct tty_struct *tty, struct seq_file *m); #ifdef CONFIG_CONSOLE_POLL int (*poll_init)(struct tty_driver *driver, int line, char *options); int (*poll_get_char)(struct tty_driver *driver, int line); void (*poll_put_char)(struct tty_driver *driver, int line, char ch); #endif int (*proc_show)(struct seq_file *m, void *driver); } __randomize_layout; /** * struct tty_driver -- driver for TTY devices * * @kref: reference counting. Reaching zero frees all the internals and the * driver. * @cdevs: allocated/registered character /dev devices * @owner: modules owning this driver. Used drivers cannot be rmmod'ed. * Automatically set by tty_alloc_driver(). * @driver_name: name of the driver used in /proc/tty * @name: used for constructing /dev node name * @name_base: used as a number base for constructing /dev node name * @major: major /dev device number (zero for autoassignment) * @minor_start: the first minor /dev device number * @num: number of devices allocated * @type: type of tty driver (enum tty_driver_type) * @subtype: subtype of tty driver (enum tty_driver_subtype) * @init_termios: termios to set to each tty initially (e.g. %tty_std_termios) * @flags: tty driver flags (%TTY_DRIVER_) * @proc_entry: proc fs entry, used internally * @other: driver of the linked tty; only used for the PTY driver * @ttys: array of active &struct tty_struct, set by tty_standard_install() * @ports: array of &struct tty_port; can be set during initialization by * tty_port_link_device() and similar * @termios: storage for termios at each TTY close for the next open * @driver_state: pointer to driver's arbitrary data * @ops: driver hooks for TTYs. Set them using tty_set_operations(). Use &struct * tty_port helpers in them as much as possible. * @tty_drivers: used internally to link tty_drivers together * * The usual handling of &struct tty_driver is to allocate it by * tty_alloc_driver(), set up all the necessary members, and register it by * tty_register_driver(). At last, the driver is torn down by calling * tty_unregister_driver() followed by tty_driver_kref_put(). * * The fields required to be set before calling tty_register_driver() include * @driver_name, @name, @type, @subtype, @init_termios, and @ops. */ struct tty_driver { struct kref kref; struct cdev **cdevs; struct module *owner; const char *driver_name; const char *name; int name_base; int major; int minor_start; unsigned int num; enum tty_driver_type type; enum tty_driver_subtype subtype; struct ktermios init_termios; unsigned long flags; struct proc_dir_entry *proc_entry; struct tty_driver *other; /* * Pointer to the tty data structures */ struct tty_struct **ttys; struct tty_port **ports; struct ktermios **termios; void *driver_state; /* * Driver methods */ const struct tty_operations *ops; struct list_head tty_drivers; } __randomize_layout; extern struct list_head tty_drivers; struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner, unsigned long flags); struct tty_driver *tty_find_polling_driver(char *name, int *line); void tty_driver_kref_put(struct tty_driver *driver); /** * tty_alloc_driver - allocate tty driver * @lines: count of lines this driver can handle at most * @flags: some of enum tty_driver_flag, will be set in driver->flags * * Returns: struct tty_driver or a PTR-encoded error (use IS_ERR() and friends). */ #define tty_alloc_driver(lines, flags) \ __tty_alloc_driver(lines, THIS_MODULE, flags) static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) { kref_get(&d->kref); return d; } static inline void tty_set_operations(struct tty_driver *driver, const struct tty_operations *op) { driver->ops = op; } int tty_register_driver(struct tty_driver *driver); void tty_unregister_driver(struct tty_driver *driver); struct device *tty_register_device(struct tty_driver *driver, unsigned index, struct device *dev); struct device *tty_register_device_attr(struct tty_driver *driver, unsigned index, struct device *device, void *drvdata, const struct attribute_group **attr_grp); void tty_unregister_device(struct tty_driver *driver, unsigned index); #ifdef CONFIG_PROC_FS void proc_tty_register_driver(struct tty_driver *); void proc_tty_unregister_driver(struct tty_driver *); #else static inline void proc_tty_register_driver(struct tty_driver *d) {} static inline void proc_tty_unregister_driver(struct tty_driver *d) {} #endif #endif /* #ifdef _LINUX_TTY_DRIVER_H */ |
| 5 5 4 4 4 4 3 3 2 2 2 1 1 1 1 1 5 5 1 2 14 14 14 14 13 13 12 12 99 82 4 14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2017 Pablo Neira Ayuso <pablo@netfilter.org> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/list.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> struct nft_bitmap_elem { struct nft_elem_priv priv; struct list_head head; struct nft_set_ext ext; }; /* This bitmap uses two bits to represent one element. These two bits determine * the element state in the current and the future generation. * * An element can be in three states. The generation cursor is represented using * the ^ character, note that this cursor shifts on every successful transaction. * If no transaction is going on, we observe all elements are in the following * state: * * 11 = this element is active in the current generation. In case of no updates, * ^ it stays active in the next generation. * 00 = this element is inactive in the current generation. In case of no * ^ updates, it stays inactive in the next generation. * * On transaction handling, we observe these two temporary states: * * 01 = this element is inactive in the current generation and it becomes active * ^ in the next one. This happens when the element is inserted but commit * path has not yet been executed yet, so activation is still pending. On * transaction abortion, the element is removed. * 10 = this element is active in the current generation and it becomes inactive * ^ in the next one. This happens when the element is deactivated but commit * path has not yet been executed yet, so removal is still pending. On * transaction abortion, the next generation bit is reset to go back to * restore its previous state. */ struct nft_bitmap { struct list_head list; u16 bitmap_size; u8 bitmap[]; }; static inline void nft_bitmap_location(const struct nft_set *set, const void *key, u32 *idx, u32 *off) { u32 k; if (set->klen == 2) k = *(u16 *)key; else k = *(u8 *)key; k <<= 1; *idx = k / BITS_PER_BYTE; *off = k % BITS_PER_BYTE; } /* Fetch the two bits that represent the element and check if it is active based * on the generation mask. */ static inline bool nft_bitmap_active(const u8 *bitmap, u32 idx, u32 off, u8 genmask) { return (bitmap[idx] & (0x3 << off)) & (genmask << off); } INDIRECT_CALLABLE_SCOPE const struct nft_set_ext * nft_bitmap_lookup(const struct net *net, const struct nft_set *set, const u32 *key) { const struct nft_bitmap *priv = nft_set_priv(set); static const struct nft_set_ext found; u8 genmask = nft_genmask_cur(net); u32 idx, off; nft_bitmap_location(set, key, &idx, &off); if (nft_bitmap_active(priv->bitmap, idx, off, genmask)) return &found; return NULL; } static struct nft_bitmap_elem * nft_bitmap_elem_find(const struct net *net, const struct nft_set *set, struct nft_bitmap_elem *this, u8 genmask) { const struct nft_bitmap *priv = nft_set_priv(set); struct nft_bitmap_elem *be; list_for_each_entry_rcu(be, &priv->list, head, lockdep_is_held(&nft_pernet(net)->commit_mutex)) { if (memcmp(nft_set_ext_key(&be->ext), nft_set_ext_key(&this->ext), set->klen) || !nft_set_elem_active(&be->ext, genmask)) continue; return be; } return NULL; } static struct nft_elem_priv * nft_bitmap_get(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, unsigned int flags) { const struct nft_bitmap *priv = nft_set_priv(set); u8 genmask = nft_genmask_cur(net); struct nft_bitmap_elem *be; list_for_each_entry_rcu(be, &priv->list, head) { if (memcmp(nft_set_ext_key(&be->ext), elem->key.val.data, set->klen) || !nft_set_elem_active(&be->ext, genmask)) continue; return &be->priv; } return ERR_PTR(-ENOENT); } static int nft_bitmap_insert(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem, struct nft_elem_priv **elem_priv) { struct nft_bitmap_elem *new = nft_elem_priv_cast(elem->priv), *be; struct nft_bitmap *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); u32 idx, off; be = nft_bitmap_elem_find(net, set, new, genmask); if (be) { *elem_priv = &be->priv; return -EEXIST; } nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off); /* Enter 01 state. */ priv->bitmap[idx] |= (genmask << off); list_add_tail_rcu(&new->head, &priv->list); return 0; } static void nft_bitmap_remove(const struct net *net, const struct nft_set *set, struct nft_elem_priv *elem_priv) { struct nft_bitmap_elem *be = nft_elem_priv_cast(elem_priv); struct nft_bitmap *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); u32 idx, off; nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); /* Enter 00 state. */ priv->bitmap[idx] &= ~(genmask << off); list_del_rcu(&be->head); } static void nft_bitmap_activate(const struct net *net, const struct nft_set *set, struct nft_elem_priv *elem_priv) { struct nft_bitmap_elem *be = nft_elem_priv_cast(elem_priv); struct nft_bitmap *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); u32 idx, off; nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); /* Enter 11 state. */ priv->bitmap[idx] |= (genmask << off); nft_clear(net, &be->ext); } static void nft_bitmap_flush(const struct net *net, const struct nft_set *set, struct nft_elem_priv *elem_priv) { struct nft_bitmap_elem *be = nft_elem_priv_cast(elem_priv); struct nft_bitmap *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); u32 idx, off; nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); /* Enter 10 state, similar to deactivation. */ priv->bitmap[idx] &= ~(genmask << off); nft_set_elem_change_active(net, set, &be->ext); } static struct nft_elem_priv * nft_bitmap_deactivate(const struct net *net, const struct nft_set *set, const struct nft_set_elem *elem) { struct nft_bitmap_elem *this = nft_elem_priv_cast(elem->priv), *be; struct nft_bitmap *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); u32 idx, off; nft_bitmap_location(set, elem->key.val.data, &idx, &off); be = nft_bitmap_elem_find(net, set, this, genmask); if (!be) return NULL; /* Enter 10 state. */ priv->bitmap[idx] &= ~(genmask << off); nft_set_elem_change_active(net, set, &be->ext); return &be->priv; } static void nft_bitmap_walk(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_iter *iter) { const struct nft_bitmap *priv = nft_set_priv(set); struct nft_bitmap_elem *be; list_for_each_entry_rcu(be, &priv->list, head) { if (iter->count < iter->skip) goto cont; iter->err = iter->fn(ctx, set, iter, &be->priv); if (iter->err < 0) return; cont: iter->count++; } } /* The bitmap size is pow(2, key length in bits) / bits per byte. This is * multiplied by two since each element takes two bits. For 8 bit keys, the * bitmap consumes 66 bytes. For 16 bit keys, 16388 bytes. */ static inline u32 nft_bitmap_size(u32 klen) { return ((2 << ((klen * BITS_PER_BYTE) - 1)) / BITS_PER_BYTE) << 1; } static inline u64 nft_bitmap_total_size(u32 klen) { return sizeof(struct nft_bitmap) + nft_bitmap_size(klen); } static u64 nft_bitmap_privsize(const struct nlattr * const nla[], const struct nft_set_desc *desc) { u32 klen = ntohl(nla_get_be32(nla[NFTA_SET_KEY_LEN])); return nft_bitmap_total_size(klen); } static int nft_bitmap_init(const struct nft_set *set, const struct nft_set_desc *desc, const struct nlattr * const nla[]) { struct nft_bitmap *priv = nft_set_priv(set); BUILD_BUG_ON(offsetof(struct nft_bitmap_elem, priv) != 0); INIT_LIST_HEAD(&priv->list); priv->bitmap_size = nft_bitmap_size(set->klen); return 0; } static void nft_bitmap_destroy(const struct nft_ctx *ctx, const struct nft_set *set) { struct nft_bitmap *priv = nft_set_priv(set); struct nft_bitmap_elem *be, *n; list_for_each_entry_safe(be, n, &priv->list, head) nf_tables_set_elem_destroy(ctx, set, &be->priv); } static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features, struct nft_set_estimate *est) { /* Make sure bitmaps we don't get bitmaps larger than 16 Kbytes. */ if (desc->klen > 2) return false; else if (desc->expr) return false; est->size = nft_bitmap_total_size(desc->klen); est->lookup = NFT_SET_CLASS_O_1; est->space = NFT_SET_CLASS_O_1; return true; } const struct nft_set_type nft_set_bitmap_type = { .ops = { .privsize = nft_bitmap_privsize, .elemsize = offsetof(struct nft_bitmap_elem, ext), .estimate = nft_bitmap_estimate, .init = nft_bitmap_init, .destroy = nft_bitmap_destroy, .insert = nft_bitmap_insert, .remove = nft_bitmap_remove, .deactivate = nft_bitmap_deactivate, .flush = nft_bitmap_flush, .activate = nft_bitmap_activate, .lookup = nft_bitmap_lookup, .walk = nft_bitmap_walk, .get = nft_bitmap_get, }, }; |
| 14 15 200 86 146 145 10 47 24 8 1 124 130 130 54 1 5 3 78 32 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 | /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef __SOUND_PCM_H #define __SOUND_PCM_H /* * Digital Audio (PCM) abstract layer * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Abramo Bagnara <abramo@alsa-project.org> */ #include <sound/asound.h> #include <sound/memalloc.h> #include <sound/minors.h> #include <linux/poll.h> #include <linux/mm.h> #include <linux/bitops.h> #include <linux/pm_qos.h> #include <linux/refcount.h> #include <linux/uio.h> #define snd_pcm_substream_chip(substream) ((substream)->private_data) #define snd_pcm_chip(pcm) ((pcm)->private_data) #if IS_ENABLED(CONFIG_SND_PCM_OSS) #include <sound/pcm_oss.h> #endif /* * Hardware (lowlevel) section */ struct snd_pcm_hardware { unsigned int info; /* SNDRV_PCM_INFO_* */ u64 formats; /* SNDRV_PCM_FMTBIT_* */ u32 subformats; /* for S32_LE, SNDRV_PCM_SUBFMTBIT_* */ unsigned int rates; /* SNDRV_PCM_RATE_* */ unsigned int rate_min; /* min rate */ unsigned int rate_max; /* max rate */ unsigned int channels_min; /* min channels */ unsigned int channels_max; /* max channels */ size_t buffer_bytes_max; /* max buffer size */ size_t period_bytes_min; /* min period size */ size_t period_bytes_max; /* max period size */ unsigned int periods_min; /* min # of periods */ unsigned int periods_max; /* max # of periods */ size_t fifo_size; /* fifo size in bytes */ }; struct snd_pcm_status64; struct snd_pcm_substream; struct snd_pcm_audio_tstamp_config; /* definitions further down */ struct snd_pcm_audio_tstamp_report; struct snd_pcm_ops { int (*open)(struct snd_pcm_substream *substream); int (*close)(struct snd_pcm_substream *substream); int (*ioctl)(struct snd_pcm_substream * substream, unsigned int cmd, void *arg); int (*hw_params)(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params); int (*hw_free)(struct snd_pcm_substream *substream); int (*prepare)(struct snd_pcm_substream *substream); int (*trigger)(struct snd_pcm_substream *substream, int cmd); int (*sync_stop)(struct snd_pcm_substream *substream); snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *substream); int (*get_time_info)(struct snd_pcm_substream *substream, struct timespec64 *system_ts, struct timespec64 *audio_ts, struct snd_pcm_audio_tstamp_config *audio_tstamp_config, struct snd_pcm_audio_tstamp_report *audio_tstamp_report); int (*fill_silence)(struct snd_pcm_substream *substream, int channel, unsigned long pos, unsigned long bytes); int (*copy)(struct snd_pcm_substream *substream, int channel, unsigned long pos, struct iov_iter *iter, unsigned long bytes); struct page *(*page)(struct snd_pcm_substream *substream, unsigned long offset); int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma); int (*ack)(struct snd_pcm_substream *substream); }; /* * */ #if defined(CONFIG_SND_DYNAMIC_MINORS) #define SNDRV_PCM_DEVICES (SNDRV_OS_MINORS-2) #else #define SNDRV_PCM_DEVICES 8 #endif #define SNDRV_PCM_IOCTL1_RESET 0 /* 1 is absent slot. */ #define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2 /* 3 is absent slot. */ #define SNDRV_PCM_IOCTL1_FIFO_SIZE 4 #define SNDRV_PCM_IOCTL1_SYNC_ID 5 #define SNDRV_PCM_TRIGGER_STOP 0 #define SNDRV_PCM_TRIGGER_START 1 #define SNDRV_PCM_TRIGGER_PAUSE_PUSH 2 #define SNDRV_PCM_TRIGGER_PAUSE_RELEASE 3 #define SNDRV_PCM_TRIGGER_SUSPEND 4 #define SNDRV_PCM_TRIGGER_RESUME 5 #define SNDRV_PCM_TRIGGER_DRAIN 6 #define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1) /* If you change this don't forget to change rates[] table in pcm_native.c */ #define SNDRV_PCM_RATE_5512 (1U<<0) /* 5512Hz */ #define SNDRV_PCM_RATE_8000 (1U<<1) /* 8000Hz */ #define SNDRV_PCM_RATE_11025 (1U<<2) /* 11025Hz */ #define SNDRV_PCM_RATE_16000 (1U<<3) /* 16000Hz */ #define SNDRV_PCM_RATE_22050 (1U<<4) /* 22050Hz */ #define SNDRV_PCM_RATE_32000 (1U<<5) /* 32000Hz */ #define SNDRV_PCM_RATE_44100 (1U<<6) /* 44100Hz */ #define SNDRV_PCM_RATE_48000 (1U<<7) /* 48000Hz */ #define SNDRV_PCM_RATE_64000 (1U<<8) /* 64000Hz */ #define SNDRV_PCM_RATE_88200 (1U<<9) /* 88200Hz */ #define SNDRV_PCM_RATE_96000 (1U<<10) /* 96000Hz */ #define SNDRV_PCM_RATE_176400 (1U<<11) /* 176400Hz */ #define SNDRV_PCM_RATE_192000 (1U<<12) /* 192000Hz */ #define SNDRV_PCM_RATE_352800 (1U<<13) /* 352800Hz */ #define SNDRV_PCM_RATE_384000 (1U<<14) /* 384000Hz */ #define SNDRV_PCM_RATE_705600 (1U<<15) /* 705600Hz */ #define SNDRV_PCM_RATE_768000 (1U<<16) /* 768000Hz */ /* extended rates since 6.12 */ #define SNDRV_PCM_RATE_12000 (1U<<17) /* 12000Hz */ #define SNDRV_PCM_RATE_24000 (1U<<18) /* 24000Hz */ #define SNDRV_PCM_RATE_128000 (1U<<19) /* 128000Hz */ #define SNDRV_PCM_RATE_CONTINUOUS (1U<<30) /* continuous range */ #define SNDRV_PCM_RATE_KNOT (1U<<31) /* supports more non-continuous rates */ #define SNDRV_PCM_RATE_8000_44100 (SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_11025|\ SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_22050|\ SNDRV_PCM_RATE_32000|SNDRV_PCM_RATE_44100) #define SNDRV_PCM_RATE_8000_48000 (SNDRV_PCM_RATE_8000_44100|SNDRV_PCM_RATE_48000) #define SNDRV_PCM_RATE_8000_96000 (SNDRV_PCM_RATE_8000_48000|SNDRV_PCM_RATE_64000|\ SNDRV_PCM_RATE_88200|SNDRV_PCM_RATE_96000) #define SNDRV_PCM_RATE_8000_192000 (SNDRV_PCM_RATE_8000_96000|SNDRV_PCM_RATE_176400|\ SNDRV_PCM_RATE_192000) #define SNDRV_PCM_RATE_8000_384000 (SNDRV_PCM_RATE_8000_192000|\ SNDRV_PCM_RATE_352800|\ SNDRV_PCM_RATE_384000) #define SNDRV_PCM_RATE_8000_768000 (SNDRV_PCM_RATE_8000_384000|\ SNDRV_PCM_RATE_705600|\ SNDRV_PCM_RATE_768000) #define _SNDRV_PCM_FMTBIT(fmt) (1ULL << (__force int)SNDRV_PCM_FORMAT_##fmt) #define SNDRV_PCM_FMTBIT_S8 _SNDRV_PCM_FMTBIT(S8) #define SNDRV_PCM_FMTBIT_U8 _SNDRV_PCM_FMTBIT(U8) #define SNDRV_PCM_FMTBIT_S16_LE _SNDRV_PCM_FMTBIT(S16_LE) #define SNDRV_PCM_FMTBIT_S16_BE _SNDRV_PCM_FMTBIT(S16_BE) #define SNDRV_PCM_FMTBIT_U16_LE _SNDRV_PCM_FMTBIT(U16_LE) #define SNDRV_PCM_FMTBIT_U16_BE _SNDRV_PCM_FMTBIT(U16_BE) #define SNDRV_PCM_FMTBIT_S24_LE _SNDRV_PCM_FMTBIT(S24_LE) #define SNDRV_PCM_FMTBIT_S24_BE _SNDRV_PCM_FMTBIT(S24_BE) #define SNDRV_PCM_FMTBIT_U24_LE _SNDRV_PCM_FMTBIT(U24_LE) #define SNDRV_PCM_FMTBIT_U24_BE _SNDRV_PCM_FMTBIT(U24_BE) // For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the // available bit count in most significant bit. It's for the case of so-called 'left-justified' or // `right-padding` sample which has less width than 32 bit. #define SNDRV_PCM_FMTBIT_S32_LE _SNDRV_PCM_FMTBIT(S32_LE) #define SNDRV_PCM_FMTBIT_S32_BE _SNDRV_PCM_FMTBIT(S32_BE) #define SNDRV_PCM_FMTBIT_U32_LE _SNDRV_PCM_FMTBIT(U32_LE) #define SNDRV_PCM_FMTBIT_U32_BE _SNDRV_PCM_FMTBIT(U32_BE) #define SNDRV_PCM_FMTBIT_FLOAT_LE _SNDRV_PCM_FMTBIT(FLOAT_LE) #define SNDRV_PCM_FMTBIT_FLOAT_BE _SNDRV_PCM_FMTBIT(FLOAT_BE) #define SNDRV_PCM_FMTBIT_FLOAT64_LE _SNDRV_PCM_FMTBIT(FLOAT64_LE) #define SNDRV_PCM_FMTBIT_FLOAT64_BE _SNDRV_PCM_FMTBIT(FLOAT64_BE) #define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE _SNDRV_PCM_FMTBIT(IEC958_SUBFRAME_LE) #define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE _SNDRV_PCM_FMTBIT(IEC958_SUBFRAME_BE) #define SNDRV_PCM_FMTBIT_MU_LAW _SNDRV_PCM_FMTBIT(MU_LAW) #define SNDRV_PCM_FMTBIT_A_LAW _SNDRV_PCM_FMTBIT(A_LAW) #define SNDRV_PCM_FMTBIT_IMA_ADPCM _SNDRV_PCM_FMTBIT(IMA_ADPCM) #define SNDRV_PCM_FMTBIT_MPEG _SNDRV_PCM_FMTBIT(MPEG) #define SNDRV_PCM_FMTBIT_GSM _SNDRV_PCM_FMTBIT(GSM) #define SNDRV_PCM_FMTBIT_S20_LE _SNDRV_PCM_FMTBIT(S20_LE) #define SNDRV_PCM_FMTBIT_U20_LE _SNDRV_PCM_FMTBIT(U20_LE) #define SNDRV_PCM_FMTBIT_S20_BE _SNDRV_PCM_FMTBIT(S20_BE) #define SNDRV_PCM_FMTBIT_U20_BE _SNDRV_PCM_FMTBIT(U20_BE) #define SNDRV_PCM_FMTBIT_SPECIAL _SNDRV_PCM_FMTBIT(SPECIAL) #define SNDRV_PCM_FMTBIT_S24_3LE _SNDRV_PCM_FMTBIT(S24_3LE) #define SNDRV_PCM_FMTBIT_U24_3LE _SNDRV_PCM_FMTBIT(U24_3LE) #define SNDRV_PCM_FMTBIT_S24_3BE _SNDRV_PCM_FMTBIT(S24_3BE) #define SNDRV_PCM_FMTBIT_U24_3BE _SNDRV_PCM_FMTBIT(U24_3BE) #define SNDRV_PCM_FMTBIT_S20_3LE _SNDRV_PCM_FMTBIT(S20_3LE) #define SNDRV_PCM_FMTBIT_U20_3LE _SNDRV_PCM_FMTBIT(U20_3LE) #define SNDRV_PCM_FMTBIT_S20_3BE _SNDRV_PCM_FMTBIT(S20_3BE) #define SNDRV_PCM_FMTBIT_U20_3BE _SNDRV_PCM_FMTBIT(U20_3BE) #define SNDRV_PCM_FMTBIT_S18_3LE _SNDRV_PCM_FMTBIT(S18_3LE) #define SNDRV_PCM_FMTBIT_U18_3LE _SNDRV_PCM_FMTBIT(U18_3LE) #define SNDRV_PCM_FMTBIT_S18_3BE _SNDRV_PCM_FMTBIT(S18_3BE) #define SNDRV_PCM_FMTBIT_U18_3BE _SNDRV_PCM_FMTBIT(U18_3BE) #define SNDRV_PCM_FMTBIT_G723_24 _SNDRV_PCM_FMTBIT(G723_24) #define SNDRV_PCM_FMTBIT_G723_24_1B _SNDRV_PCM_FMTBIT(G723_24_1B) #define SNDRV_PCM_FMTBIT_G723_40 _SNDRV_PCM_FMTBIT(G723_40) #define SNDRV_PCM_FMTBIT_G723_40_1B _SNDRV_PCM_FMTBIT(G723_40_1B) #define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8) #define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE) #define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE) #define SNDRV_PCM_FMTBIT_DSD_U16_BE _SNDRV_PCM_FMTBIT(DSD_U16_BE) #define SNDRV_PCM_FMTBIT_DSD_U32_BE _SNDRV_PCM_FMTBIT(DSD_U32_BE) #ifdef SNDRV_LITTLE_ENDIAN #define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE #define SNDRV_PCM_FMTBIT_U16 SNDRV_PCM_FMTBIT_U16_LE #define SNDRV_PCM_FMTBIT_S24 SNDRV_PCM_FMTBIT_S24_LE #define SNDRV_PCM_FMTBIT_U24 SNDRV_PCM_FMTBIT_U24_LE #define SNDRV_PCM_FMTBIT_S32 SNDRV_PCM_FMTBIT_S32_LE #define SNDRV_PCM_FMTBIT_U32 SNDRV_PCM_FMTBIT_U32_LE #define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_LE #define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_LE #define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE #define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_LE #define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_LE #endif #ifdef SNDRV_BIG_ENDIAN #define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_BE #define SNDRV_PCM_FMTBIT_U16 SNDRV_PCM_FMTBIT_U16_BE #define SNDRV_PCM_FMTBIT_S24 SNDRV_PCM_FMTBIT_S24_BE #define SNDRV_PCM_FMTBIT_U24 SNDRV_PCM_FMTBIT_U24_BE #define SNDRV_PCM_FMTBIT_S32 SNDRV_PCM_FMTBIT_S32_BE #define SNDRV_PCM_FMTBIT_U32 SNDRV_PCM_FMTBIT_U32_BE #define SNDRV_PCM_FMTBIT_FLOAT SNDRV_PCM_FMTBIT_FLOAT_BE #define SNDRV_PCM_FMTBIT_FLOAT64 SNDRV_PCM_FMTBIT_FLOAT64_BE #define SNDRV_PCM_FMTBIT_IEC958_SUBFRAME SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE #define SNDRV_PCM_FMTBIT_S20 SNDRV_PCM_FMTBIT_S20_BE #define SNDRV_PCM_FMTBIT_U20 SNDRV_PCM_FMTBIT_U20_BE #endif #define _SNDRV_PCM_SUBFMTBIT(fmt) BIT((__force int)SNDRV_PCM_SUBFORMAT_##fmt) #define SNDRV_PCM_SUBFMTBIT_STD _SNDRV_PCM_SUBFMTBIT(STD) #define SNDRV_PCM_SUBFMTBIT_MSBITS_MAX _SNDRV_PCM_SUBFMTBIT(MSBITS_MAX) #define SNDRV_PCM_SUBFMTBIT_MSBITS_20 _SNDRV_PCM_SUBFMTBIT(MSBITS_20) #define SNDRV_PCM_SUBFMTBIT_MSBITS_24 _SNDRV_PCM_SUBFMTBIT(MSBITS_24) struct snd_pcm_file { struct snd_pcm_substream *substream; int no_compat_mmap; unsigned int user_pversion; /* supported protocol version */ }; struct snd_pcm_hw_rule; typedef int (*snd_pcm_hw_rule_func_t)(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule); struct snd_pcm_hw_rule { unsigned int cond; int var; int deps[5]; snd_pcm_hw_rule_func_t func; void *private; }; struct snd_pcm_hw_constraints { struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK - SNDRV_PCM_HW_PARAM_FIRST_MASK + 1]; struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL + 1]; unsigned int rules_num; unsigned int rules_all; struct snd_pcm_hw_rule *rules; }; static inline struct snd_mask *constrs_mask(struct snd_pcm_hw_constraints *constrs, snd_pcm_hw_param_t var) { return &constrs->masks[var - SNDRV_PCM_HW_PARAM_FIRST_MASK]; } static inline struct snd_interval *constrs_interval(struct snd_pcm_hw_constraints *constrs, snd_pcm_hw_param_t var) { return &constrs->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL]; } struct snd_ratnum { unsigned int num; unsigned int den_min, den_max, den_step; }; struct snd_ratden { unsigned int num_min, num_max, num_step; unsigned int den; }; struct snd_pcm_hw_constraint_ratnums { int nrats; const struct snd_ratnum *rats; }; struct snd_pcm_hw_constraint_ratdens { int nrats; const struct snd_ratden *rats; }; struct snd_pcm_hw_constraint_list { const unsigned int *list; unsigned int count; unsigned int mask; }; struct snd_pcm_hw_constraint_ranges { unsigned int count; const struct snd_interval *ranges; unsigned int mask; }; /* * userspace-provided audio timestamp config to kernel, * structure is for internal use only and filled with dedicated unpack routine */ struct snd_pcm_audio_tstamp_config { /* 5 of max 16 bits used */ u32 type_requested:4; u32 report_delay:1; /* add total delay to A/D or D/A */ }; static inline void snd_pcm_unpack_audio_tstamp_config(__u32 data, struct snd_pcm_audio_tstamp_config *config) { config->type_requested = data & 0xF; config->report_delay = (data >> 4) & 1; } /* * kernel-provided audio timestamp report to user-space * structure is for internal use only and read by dedicated pack routine */ struct snd_pcm_audio_tstamp_report { /* 6 of max 16 bits used for bit-fields */ /* for backwards compatibility */ u32 valid:1; /* actual type if hardware could not support requested timestamp */ u32 actual_type:4; /* accuracy represented in ns units */ u32 accuracy_report:1; /* 0 if accuracy unknown, 1 if accuracy field is valid */ u32 accuracy; /* up to 4.29s, will be packed in separate field */ }; static inline void snd_pcm_pack_audio_tstamp_report(__u32 *data, __u32 *accuracy, const struct snd_pcm_audio_tstamp_report *report) { u32 tmp; tmp = report->accuracy_report; tmp <<= 4; tmp |= report->actual_type; tmp <<= 1; tmp |= report->valid; *data &= 0xffff; /* zero-clear MSBs */ *data |= (tmp << 16); *accuracy = report->accuracy; } struct snd_pcm_runtime { /* -- Status -- */ snd_pcm_state_t state; /* stream state */ snd_pcm_state_t suspended_state; /* suspended stream state */ struct snd_pcm_substream *trigger_master; struct timespec64 trigger_tstamp; /* trigger timestamp */ bool trigger_tstamp_latched; /* trigger timestamp latched in low-level driver/hardware */ int overrange; snd_pcm_uframes_t avail_max; snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */ snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time */ unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */ unsigned long hw_ptr_buffer_jiffies; /* buffer time in jiffies */ snd_pcm_sframes_t delay; /* extra delay; typically FIFO size */ u64 hw_ptr_wrap; /* offset for hw_ptr due to boundary wrap-around */ /* -- HW params -- */ snd_pcm_access_t access; /* access mode */ snd_pcm_format_t format; /* SNDRV_PCM_FORMAT_* */ snd_pcm_subformat_t subformat; /* subformat */ unsigned int rate; /* rate in Hz */ unsigned int channels; /* channels */ snd_pcm_uframes_t period_size; /* period size */ unsigned int periods; /* periods */ snd_pcm_uframes_t buffer_size; /* buffer size */ snd_pcm_uframes_t min_align; /* Min alignment for the format */ size_t byte_align; unsigned int frame_bits; unsigned int sample_bits; unsigned int info; unsigned int rate_num; unsigned int rate_den; unsigned int no_period_wakeup: 1; /* -- SW params; see struct snd_pcm_sw_params for comments -- */ int tstamp_mode; unsigned int period_step; snd_pcm_uframes_t start_threshold; snd_pcm_uframes_t stop_threshold; snd_pcm_uframes_t silence_threshold; snd_pcm_uframes_t silence_size; snd_pcm_uframes_t boundary; /* internal data of auto-silencer */ snd_pcm_uframes_t silence_start; /* starting pointer to silence area */ snd_pcm_uframes_t silence_filled; /* already filled part of silence area */ bool std_sync_id; /* hardware synchronization - standard per card ID */ /* -- mmap -- */ struct snd_pcm_mmap_status *status; struct snd_pcm_mmap_control *control; /* -- locking / scheduling -- */ snd_pcm_uframes_t twake; /* do transfer (!poll) wakeup if non-zero */ wait_queue_head_t sleep; /* poll sleep */ wait_queue_head_t tsleep; /* transfer sleep */ struct snd_fasync *fasync; bool stop_operating; /* sync_stop will be called */ struct mutex buffer_mutex; /* protect for buffer changes */ atomic_t buffer_accessing; /* >0: in r/w operation, <0: blocked */ /* -- private section -- */ void *private_data; void (*private_free)(struct snd_pcm_runtime *runtime); /* -- hardware description -- */ struct snd_pcm_hardware hw; struct snd_pcm_hw_constraints hw_constraints; /* -- timer -- */ unsigned int timer_resolution; /* timer resolution */ int tstamp_type; /* timestamp type */ /* -- DMA -- */ unsigned char *dma_area; /* DMA area */ dma_addr_t dma_addr; /* physical bus address (not accessible from main CPU) */ size_t dma_bytes; /* size of DMA area */ struct snd_dma_buffer *dma_buffer_p; /* allocated buffer */ unsigned int buffer_changed:1; /* buffer allocation changed; set only in managed mode */ /* -- audio timestamp config -- */ struct snd_pcm_audio_tstamp_config audio_tstamp_config; struct snd_pcm_audio_tstamp_report audio_tstamp_report; struct timespec64 driver_tstamp; #if IS_ENABLED(CONFIG_SND_PCM_OSS) /* -- OSS things -- */ struct snd_pcm_oss_runtime oss; #endif }; struct snd_pcm_group { /* keep linked substreams */ spinlock_t lock; struct mutex mutex; struct list_head substreams; refcount_t refs; }; struct pid; struct snd_pcm_substream { struct snd_pcm *pcm; struct snd_pcm_str *pstr; void *private_data; /* copied from pcm->private_data */ int number; char name[32]; /* substream name */ int stream; /* stream (direction) */ struct pm_qos_request latency_pm_qos_req; /* pm_qos request */ size_t buffer_bytes_max; /* limit ring buffer size */ struct snd_dma_buffer dma_buffer; size_t dma_max; /* -- hardware operations -- */ const struct snd_pcm_ops *ops; /* -- runtime information -- */ struct snd_pcm_runtime *runtime; /* -- timer section -- */ struct snd_timer *timer; /* timer */ unsigned timer_running: 1; /* time is running */ long wait_time; /* time in ms for R/W to wait for avail */ /* -- next substream -- */ struct snd_pcm_substream *next; /* -- linked substreams -- */ struct list_head link_list; /* linked list member */ struct snd_pcm_group self_group; /* fake group for non linked substream (with substream lock inside) */ struct snd_pcm_group *group; /* pointer to current group */ /* -- assigned files -- */ int ref_count; atomic_t mmap_count; unsigned int f_flags; void (*pcm_release)(struct snd_pcm_substream *); struct pid *pid; #if IS_ENABLED(CONFIG_SND_PCM_OSS) /* -- OSS things -- */ struct snd_pcm_oss_substream oss; #endif #ifdef CONFIG_SND_VERBOSE_PROCFS struct snd_info_entry *proc_root; #endif /* CONFIG_SND_VERBOSE_PROCFS */ /* misc flags */ unsigned int hw_opened: 1; unsigned int managed_buffer_alloc:1; #ifdef CONFIG_SND_PCM_XRUN_DEBUG unsigned int xrun_counter; /* number of times xrun happens */ #endif /* CONFIG_SND_PCM_XRUN_DEBUG */ }; #define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0) struct snd_pcm_str { int stream; /* stream (direction) */ struct snd_pcm *pcm; /* -- substreams -- */ unsigned int substream_count; unsigned int substream_opened; struct snd_pcm_substream *substream; #if IS_ENABLED(CONFIG_SND_PCM_OSS) /* -- OSS things -- */ struct snd_pcm_oss_stream oss; #endif #ifdef CONFIG_SND_VERBOSE_PROCFS struct snd_info_entry *proc_root; #ifdef CONFIG_SND_PCM_XRUN_DEBUG unsigned int xrun_debug; /* 0 = disabled, 1 = verbose, 2 = stacktrace */ #endif #endif struct snd_kcontrol *chmap_kctl; /* channel-mapping controls */ struct device *dev; }; struct snd_pcm { struct snd_card *card; struct list_head list; int device; /* device number */ unsigned int info_flags; unsigned short dev_class; unsigned short dev_subclass; char id[64]; char name[80]; struct snd_pcm_str streams[2]; struct mutex open_mutex; wait_queue_head_t open_wait; void *private_data; void (*private_free) (struct snd_pcm *pcm); bool internal; /* pcm is for internal use only */ bool nonatomic; /* whole PCM operations are in non-atomic context */ bool no_device_suspend; /* don't invoke device PM suspend */ #if IS_ENABLED(CONFIG_SND_PCM_OSS) struct snd_pcm_oss oss; #endif }; /* * Registering */ extern const struct file_operations snd_pcm_f_ops[2]; int snd_pcm_new(struct snd_card *card, const char *id, int device, int playback_count, int capture_count, struct snd_pcm **rpcm); int snd_pcm_new_internal(struct snd_card *card, const char *id, int device, int playback_count, int capture_count, struct snd_pcm **rpcm); int snd_pcm_new_stream(struct snd_pcm *pcm, int stream, int substream_count); #if IS_ENABLED(CONFIG_SND_PCM_OSS) struct snd_pcm_notify { int (*n_register) (struct snd_pcm * pcm); int (*n_disconnect) (struct snd_pcm * pcm); int (*n_unregister) (struct snd_pcm * pcm); struct list_head list; }; int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree); #endif /* * Native I/O */ int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info); int snd_pcm_info_user(struct snd_pcm_substream *substream, struct snd_pcm_info __user *info); int snd_pcm_status64(struct snd_pcm_substream *substream, struct snd_pcm_status64 *status); int snd_pcm_start(struct snd_pcm_substream *substream); int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t status); int snd_pcm_drain_done(struct snd_pcm_substream *substream); int snd_pcm_stop_xrun(struct snd_pcm_substream *substream); #ifdef CONFIG_PM int snd_pcm_suspend_all(struct snd_pcm *pcm); #else static inline int snd_pcm_suspend_all(struct snd_pcm *pcm) { return 0; } #endif int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg); int snd_pcm_open_substream(struct snd_pcm *pcm, int stream, struct file *file, struct snd_pcm_substream **rsubstream); void snd_pcm_release_substream(struct snd_pcm_substream *substream); int snd_pcm_attach_substream(struct snd_pcm *pcm, int stream, struct file *file, struct snd_pcm_substream **rsubstream); void snd_pcm_detach_substream(struct snd_pcm_substream *substream); int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area); #ifdef CONFIG_SND_DEBUG void snd_pcm_debug_name(struct snd_pcm_substream *substream, char *name, size_t len); #else static inline void snd_pcm_debug_name(struct snd_pcm_substream *substream, char *buf, size_t size) { *buf = 0; } #endif /* * PCM library */ /** * snd_pcm_stream_linked - Check whether the substream is linked with others * @substream: substream to check * * Return: true if the given substream is being linked with others */ static inline int snd_pcm_stream_linked(struct snd_pcm_substream *substream) { return substream->group != &substream->self_group; } void snd_pcm_stream_lock(struct snd_pcm_substream *substream); void snd_pcm_stream_unlock(struct snd_pcm_substream *substream); void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream); void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream); unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream); unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream); /** * snd_pcm_stream_lock_irqsave - Lock the PCM stream * @substream: PCM substream * @flags: irq flags * * This locks the PCM stream like snd_pcm_stream_lock() but with the local * IRQ (only when nonatomic is false). In nonatomic case, this is identical * as snd_pcm_stream_lock(). */ #define snd_pcm_stream_lock_irqsave(substream, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _snd_pcm_stream_lock_irqsave(substream); \ } while (0) void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, unsigned long flags); /** * snd_pcm_stream_lock_irqsave_nested - Single-nested PCM stream locking * @substream: PCM substream * @flags: irq flags * * This locks the PCM stream like snd_pcm_stream_lock_irqsave() but with * the single-depth lockdep subclass. */ #define snd_pcm_stream_lock_irqsave_nested(substream, flags) \ do { \ typecheck(unsigned long, flags); \ flags = _snd_pcm_stream_lock_irqsave_nested(substream); \ } while (0) /* definitions for guard(); use like guard(pcm_stream_lock) */ DEFINE_LOCK_GUARD_1(pcm_stream_lock, struct snd_pcm_substream, snd_pcm_stream_lock(_T->lock), snd_pcm_stream_unlock(_T->lock)) DEFINE_LOCK_GUARD_1(pcm_stream_lock_irq, struct snd_pcm_substream, snd_pcm_stream_lock_irq(_T->lock), snd_pcm_stream_unlock_irq(_T->lock)) DEFINE_LOCK_GUARD_1(pcm_stream_lock_irqsave, struct snd_pcm_substream, snd_pcm_stream_lock_irqsave(_T->lock, _T->flags), snd_pcm_stream_unlock_irqrestore(_T->lock, _T->flags), unsigned long flags) /** * snd_pcm_group_for_each_entry - iterate over the linked substreams * @s: the iterator * @substream: the substream * * Iterate over the all linked substreams to the given @substream. * When @substream isn't linked with any others, this gives returns @substream * itself once. */ #define snd_pcm_group_for_each_entry(s, substream) \ list_for_each_entry(s, &substream->group->substreams, link_list) #define for_each_pcm_streams(stream) \ for (stream = SNDRV_PCM_STREAM_PLAYBACK; \ stream <= SNDRV_PCM_STREAM_LAST; \ stream++) /** * snd_pcm_running - Check whether the substream is in a running state * @substream: substream to check * * Return: true if the given substream is in the state RUNNING, or in the * state DRAINING for playback. */ static inline int snd_pcm_running(struct snd_pcm_substream *substream) { return (substream->runtime->state == SNDRV_PCM_STATE_RUNNING || (substream->runtime->state == SNDRV_PCM_STATE_DRAINING && substream->stream == SNDRV_PCM_STREAM_PLAYBACK)); } /** * __snd_pcm_set_state - Change the current PCM state * @runtime: PCM runtime to set * @state: the current state to set * * Call within the stream lock */ static inline void __snd_pcm_set_state(struct snd_pcm_runtime *runtime, snd_pcm_state_t state) { runtime->state = state; runtime->status->state = state; /* copy for mmap */ } /** * bytes_to_samples - Unit conversion of the size from bytes to samples * @runtime: PCM runtime instance * @size: size in bytes * * Return: the size in samples */ static inline ssize_t bytes_to_samples(struct snd_pcm_runtime *runtime, ssize_t size) { return size * 8 / runtime->sample_bits; } /** * bytes_to_frames - Unit conversion of the size from bytes to frames * @runtime: PCM runtime instance * @size: size in bytes * * Return: the size in frames */ static inline snd_pcm_sframes_t bytes_to_frames(struct snd_pcm_runtime *runtime, ssize_t size) { return size * 8 / runtime->frame_bits; } /** * samples_to_bytes - Unit conversion of the size from samples to bytes * @runtime: PCM runtime instance * @size: size in samples * * Return: the byte size */ static inline ssize_t samples_to_bytes(struct snd_pcm_runtime *runtime, ssize_t size) { return size * runtime->sample_bits / 8; } /** * frames_to_bytes - Unit conversion of the size from frames to bytes * @runtime: PCM runtime instance * @size: size in frames * * Return: the byte size */ static inline ssize_t frames_to_bytes(struct snd_pcm_runtime *runtime, snd_pcm_sframes_t size) { return size * runtime->frame_bits / 8; } /** * frame_aligned - Check whether the byte size is aligned to frames * @runtime: PCM runtime instance * @bytes: size in bytes * * Return: true if aligned, or false if not */ static inline int frame_aligned(struct snd_pcm_runtime *runtime, ssize_t bytes) { return bytes % runtime->byte_align == 0; } /** * snd_pcm_lib_buffer_bytes - Get the buffer size of the current PCM in bytes * @substream: PCM substream * * Return: buffer byte size */ static inline size_t snd_pcm_lib_buffer_bytes(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return frames_to_bytes(runtime, runtime->buffer_size); } /** * snd_pcm_lib_period_bytes - Get the period size of the current PCM in bytes * @substream: PCM substream * * Return: period byte size */ static inline size_t snd_pcm_lib_period_bytes(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return frames_to_bytes(runtime, runtime->period_size); } /** * snd_pcm_playback_avail - Get the available (writable) space for playback * @runtime: PCM runtime instance * * Result is between 0 ... (boundary - 1) * * Return: available frame size */ static inline snd_pcm_uframes_t snd_pcm_playback_avail(struct snd_pcm_runtime *runtime) { snd_pcm_sframes_t avail = runtime->status->hw_ptr + runtime->buffer_size - runtime->control->appl_ptr; if (avail < 0) avail += runtime->boundary; else if ((snd_pcm_uframes_t) avail >= runtime->boundary) avail -= runtime->boundary; return avail; } /** * snd_pcm_capture_avail - Get the available (readable) space for capture * @runtime: PCM runtime instance * * Result is between 0 ... (boundary - 1) * * Return: available frame size */ static inline snd_pcm_uframes_t snd_pcm_capture_avail(struct snd_pcm_runtime *runtime) { snd_pcm_sframes_t avail = runtime->status->hw_ptr - runtime->control->appl_ptr; if (avail < 0) avail += runtime->boundary; return avail; } /** * snd_pcm_playback_hw_avail - Get the queued space for playback * @runtime: PCM runtime instance * * Return: available frame size */ static inline snd_pcm_sframes_t snd_pcm_playback_hw_avail(struct snd_pcm_runtime *runtime) { return runtime->buffer_size - snd_pcm_playback_avail(runtime); } /** * snd_pcm_capture_hw_avail - Get the free space for capture * @runtime: PCM runtime instance * * Return: available frame size */ static inline snd_pcm_sframes_t snd_pcm_capture_hw_avail(struct snd_pcm_runtime *runtime) { return runtime->buffer_size - snd_pcm_capture_avail(runtime); } /** * snd_pcm_playback_ready - check whether the playback buffer is available * @substream: the pcm substream instance * * Checks whether enough free space is available on the playback buffer. * * Return: Non-zero if available, or zero if not. */ static inline int snd_pcm_playback_ready(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return snd_pcm_playback_avail(runtime) >= runtime->control->avail_min; } /** * snd_pcm_capture_ready - check whether the capture buffer is available * @substream: the pcm substream instance * * Checks whether enough capture data is available on the capture buffer. * * Return: Non-zero if available, or zero if not. */ static inline int snd_pcm_capture_ready(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return snd_pcm_capture_avail(runtime) >= runtime->control->avail_min; } /** * snd_pcm_playback_data - check whether any data exists on the playback buffer * @substream: the pcm substream instance * * Checks whether any data exists on the playback buffer. * * Return: Non-zero if any data exists, or zero if not. If stop_threshold * is bigger or equal to boundary, then this function returns always non-zero. */ static inline int snd_pcm_playback_data(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->stop_threshold >= runtime->boundary) return 1; return snd_pcm_playback_avail(runtime) < runtime->buffer_size; } /** * snd_pcm_playback_empty - check whether the playback buffer is empty * @substream: the pcm substream instance * * Checks whether the playback buffer is empty. * * Return: Non-zero if empty, or zero if not. */ static inline int snd_pcm_playback_empty(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return snd_pcm_playback_avail(runtime) >= runtime->buffer_size; } /** * snd_pcm_capture_empty - check whether the capture buffer is empty * @substream: the pcm substream instance * * Checks whether the capture buffer is empty. * * Return: Non-zero if empty, or zero if not. */ static inline int snd_pcm_capture_empty(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; return snd_pcm_capture_avail(runtime) == 0; } /** * snd_pcm_trigger_done - Mark the master substream * @substream: the pcm substream instance * @master: the linked master substream * * When multiple substreams of the same card are linked and the hardware * supports the single-shot operation, the driver calls this in the loop * in snd_pcm_group_for_each_entry() for marking the substream as "done". * Then most of trigger operations are performed only to the given master * substream. * * The trigger_master mark is cleared at timestamp updates at the end * of trigger operations. */ static inline void snd_pcm_trigger_done(struct snd_pcm_substream *substream, struct snd_pcm_substream *master) { substream->runtime->trigger_master = master; } static inline int hw_is_mask(int var) { return var >= SNDRV_PCM_HW_PARAM_FIRST_MASK && var <= SNDRV_PCM_HW_PARAM_LAST_MASK; } static inline int hw_is_interval(int var) { return var >= SNDRV_PCM_HW_PARAM_FIRST_INTERVAL && var <= SNDRV_PCM_HW_PARAM_LAST_INTERVAL; } static inline struct snd_mask *hw_param_mask(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { return ¶ms->masks[var - SNDRV_PCM_HW_PARAM_FIRST_MASK]; } static inline struct snd_interval *hw_param_interval(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { return ¶ms->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL]; } static inline const struct snd_mask *hw_param_mask_c(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { return ¶ms->masks[var - SNDRV_PCM_HW_PARAM_FIRST_MASK]; } static inline const struct snd_interval *hw_param_interval_c(const struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var) { return ¶ms->intervals[var - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL]; } /** * params_channels - Get the number of channels from the hw params * @p: hw params * * Return: the number of channels */ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_CHANNELS)->min; } /** * params_rate - Get the sample rate from the hw params * @p: hw params * * Return: the sample rate */ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_RATE)->min; } /** * params_period_size - Get the period size (in frames) from the hw params * @p: hw params * * Return: the period size in frames */ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIOD_SIZE)->min; } /** * params_periods - Get the number of periods from the hw params * @p: hw params * * Return: the number of periods */ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_PERIODS)->min; } /** * params_buffer_size - Get the buffer size (in frames) from the hw params * @p: hw params * * Return: the buffer size in frames */ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_SIZE)->min; } /** * params_buffer_bytes - Get the buffer size (in bytes) from the hw params * @p: hw params * * Return: the buffer size in bytes */ static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p) { return hw_param_interval_c(p, SNDRV_PCM_HW_PARAM_BUFFER_BYTES)->min; } int snd_interval_refine(struct snd_interval *i, const struct snd_interval *v); int snd_interval_list(struct snd_interval *i, unsigned int count, const unsigned int *list, unsigned int mask); int snd_interval_ranges(struct snd_interval *i, unsigned int count, const struct snd_interval *list, unsigned int mask); int snd_interval_ratnum(struct snd_interval *i, unsigned int rats_count, const struct snd_ratnum *rats, unsigned int *nump, unsigned int *denp); void _snd_pcm_hw_params_any(struct snd_pcm_hw_params *params); void _snd_pcm_hw_param_setempty(struct snd_pcm_hw_params *params, snd_pcm_hw_param_t var); int snd_pcm_hw_refine(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params); int snd_pcm_hw_constraint_mask64(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, u_int64_t mask); int snd_pcm_hw_constraint_minmax(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, unsigned int min, unsigned int max); int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var); int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_list *l); int snd_pcm_hw_constraint_ranges(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ranges *r); int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ratnums *r); int snd_pcm_hw_constraint_ratdens(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, const struct snd_pcm_hw_constraint_ratdens *r); int snd_pcm_hw_constraint_msbits(struct snd_pcm_runtime *runtime, unsigned int cond, unsigned int width, unsigned int msbits); int snd_pcm_hw_constraint_step(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var, unsigned long step); int snd_pcm_hw_constraint_pow2(struct snd_pcm_runtime *runtime, unsigned int cond, snd_pcm_hw_param_t var); int snd_pcm_hw_rule_noresample(struct snd_pcm_runtime *runtime, unsigned int base_rate); int snd_pcm_hw_rule_add(struct snd_pcm_runtime *runtime, unsigned int cond, int var, snd_pcm_hw_rule_func_t func, void *private, int dep, ...); /** * snd_pcm_hw_constraint_single() - Constrain parameter to a single value * @runtime: PCM runtime instance * @var: The hw_params variable to constrain * @val: The value to constrain to * * Return: Positive if the value is changed, zero if it's not changed, or a * negative error code. */ static inline int snd_pcm_hw_constraint_single( struct snd_pcm_runtime *runtime, snd_pcm_hw_param_t var, unsigned int val) { return snd_pcm_hw_constraint_minmax(runtime, var, val, val); } int snd_pcm_format_signed(snd_pcm_format_t format); int snd_pcm_format_unsigned(snd_pcm_format_t format); int snd_pcm_format_linear(snd_pcm_format_t format); int snd_pcm_format_little_endian(snd_pcm_format_t format); int snd_pcm_format_big_endian(snd_pcm_format_t format); #if 0 /* just for kernel-doc */ /** * snd_pcm_format_cpu_endian - Check the PCM format is CPU-endian * @format: the format to check * * Return: 1 if the given PCM format is CPU-endian, 0 if * opposite, or a negative error code if endian not specified. */ int snd_pcm_format_cpu_endian(snd_pcm_format_t format); #endif /* DocBook */ #ifdef SNDRV_LITTLE_ENDIAN #define snd_pcm_format_cpu_endian(format) snd_pcm_format_little_endian(format) #else #define snd_pcm_format_cpu_endian(format) snd_pcm_format_big_endian(format) #endif int snd_pcm_format_width(snd_pcm_format_t format); /* in bits */ int snd_pcm_format_physical_width(snd_pcm_format_t format); /* in bits */ ssize_t snd_pcm_format_size(snd_pcm_format_t format, size_t samples); const unsigned char *snd_pcm_format_silence_64(snd_pcm_format_t format); int snd_pcm_format_set_silence(snd_pcm_format_t format, void *buf, unsigned int frames); void snd_pcm_set_ops(struct snd_pcm * pcm, int direction, const struct snd_pcm_ops *ops); void snd_pcm_set_sync_per_card(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, const unsigned char *id, unsigned int len); /** * snd_pcm_set_sync - set the PCM sync id * @substream: the pcm substream * * Use the default PCM sync identifier for the specific card. */ static inline void snd_pcm_set_sync(struct snd_pcm_substream *substream) { substream->runtime->std_sync_id = true; } int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg); void snd_pcm_period_elapsed_under_stream_lock(struct snd_pcm_substream *substream); void snd_pcm_period_elapsed(struct snd_pcm_substream *substream); snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, void *buf, bool interleaved, snd_pcm_uframes_t frames, bool in_kernel); static inline snd_pcm_sframes_t snd_pcm_lib_write(struct snd_pcm_substream *substream, const void __user *buf, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false); } static inline snd_pcm_sframes_t snd_pcm_lib_read(struct snd_pcm_substream *substream, void __user *buf, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false); } static inline snd_pcm_sframes_t snd_pcm_lib_writev(struct snd_pcm_substream *substream, void __user **bufs, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, (void *)bufs, false, frames, false); } static inline snd_pcm_sframes_t snd_pcm_lib_readv(struct snd_pcm_substream *substream, void __user **bufs, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, (void *)bufs, false, frames, false); } static inline snd_pcm_sframes_t snd_pcm_kernel_write(struct snd_pcm_substream *substream, const void *buf, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, true); } static inline snd_pcm_sframes_t snd_pcm_kernel_read(struct snd_pcm_substream *substream, void *buf, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, buf, true, frames, true); } static inline snd_pcm_sframes_t snd_pcm_kernel_writev(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, bufs, false, frames, true); } static inline snd_pcm_sframes_t snd_pcm_kernel_readv(struct snd_pcm_substream *substream, void **bufs, snd_pcm_uframes_t frames) { return __snd_pcm_lib_xfer(substream, bufs, false, frames, true); } int snd_pcm_hw_limit_rates(struct snd_pcm_hardware *hw); static inline int snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime) { return snd_pcm_hw_limit_rates(&runtime->hw); } unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate); unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit); unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a, unsigned int rates_b); /** * snd_pcm_set_runtime_buffer - Set the PCM runtime buffer * @substream: PCM substream to set * @bufp: the buffer information, NULL to clear * * Copy the buffer information to runtime->dma_buffer when @bufp is non-NULL. * Otherwise it clears the current buffer information. */ static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substream, struct snd_dma_buffer *bufp) { struct snd_pcm_runtime *runtime = substream->runtime; if (bufp) { runtime->dma_buffer_p = bufp; runtime->dma_area = bufp->area; runtime->dma_addr = bufp->addr; runtime->dma_bytes = bufp->bytes; } else { runtime->dma_buffer_p = NULL; runtime->dma_area = NULL; runtime->dma_addr = 0; runtime->dma_bytes = 0; } } /** * snd_pcm_gettime - Fill the timespec64 depending on the timestamp mode * @runtime: PCM runtime instance * @tv: timespec64 to fill */ static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime, struct timespec64 *tv) { switch (runtime->tstamp_type) { case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC: ktime_get_ts64(tv); break; case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW: ktime_get_raw_ts64(tv); break; default: ktime_get_real_ts64(tv); break; } } /* * Memory */ void snd_pcm_lib_preallocate_free(struct snd_pcm_substream *substream); void snd_pcm_lib_preallocate_free_for_all(struct snd_pcm *pcm); void snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream, int type, struct device *data, size_t size, size_t max); void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm, int type, void *data, size_t size, size_t max); int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size); int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream); int snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type, struct device *data, size_t size, size_t max); int snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type, struct device *data, size_t size, size_t max); /** * snd_pcm_set_fixed_buffer - Preallocate and set up the fixed size PCM buffer * @substream: the pcm substream instance * @type: DMA type (SNDRV_DMA_TYPE_*) * @data: DMA type dependent data * @size: the requested pre-allocation size in bytes * * This is a variant of snd_pcm_set_managed_buffer(), but this pre-allocates * only the given sized buffer and doesn't allow re-allocation nor dynamic * allocation of a larger buffer unlike the standard one. * The function may return -ENOMEM error, hence the caller must check it. * * Return: zero if successful, or a negative error code */ static inline int __must_check snd_pcm_set_fixed_buffer(struct snd_pcm_substream *substream, int type, struct device *data, size_t size) { return snd_pcm_set_managed_buffer(substream, type, data, size, 0); } /** * snd_pcm_set_fixed_buffer_all - Preallocate and set up the fixed size PCM buffer * @pcm: the pcm instance * @type: DMA type (SNDRV_DMA_TYPE_*) * @data: DMA type dependent data * @size: the requested pre-allocation size in bytes * * Apply the set up of the fixed buffer via snd_pcm_set_fixed_buffer() for * all substream. If any of allocation fails, it returns -ENOMEM, hence the * caller must check the return value. * * Return: zero if successful, or a negative error code */ static inline int __must_check snd_pcm_set_fixed_buffer_all(struct snd_pcm *pcm, int type, struct device *data, size_t size) { return snd_pcm_set_managed_buffer_all(pcm, type, data, size, 0); } #define snd_pcm_get_dma_buf(substream) ((substream)->runtime->dma_buffer_p) /** * snd_pcm_sgbuf_get_addr - Get the DMA address at the corresponding offset * @substream: PCM substream * @ofs: byte offset * * Return: DMA address */ static inline dma_addr_t snd_pcm_sgbuf_get_addr(struct snd_pcm_substream *substream, unsigned int ofs) { return snd_sgbuf_get_addr(snd_pcm_get_dma_buf(substream), ofs); } /** * snd_pcm_sgbuf_get_chunk_size - Compute the max size that fits within the * contig. page from the given size * @substream: PCM substream * @ofs: byte offset * @size: byte size to examine * * Return: chunk size */ static inline unsigned int snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream, unsigned int ofs, unsigned int size) { return snd_sgbuf_get_chunk_size(snd_pcm_get_dma_buf(substream), ofs, size); } int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *area); /* mmap for io-memory area */ #if defined(CONFIG_X86) || defined(CONFIG_PPC) || defined(CONFIG_ALPHA) #define SNDRV_PCM_INFO_MMAP_IOMEM SNDRV_PCM_INFO_MMAP int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area); #else #define SNDRV_PCM_INFO_MMAP_IOMEM 0 #define snd_pcm_lib_mmap_iomem NULL #endif void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime); /** * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer * @dma: DMA number * @max: pointer to store the max size */ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max) { *max = dma < 4 ? 64 * 1024 : 128 * 1024; } /* * Misc */ #define SNDRV_PCM_DEFAULT_CON_SPDIF (IEC958_AES0_CON_EMPHASIS_NONE|\ (IEC958_AES1_CON_ORIGINAL<<8)|\ (IEC958_AES1_CON_PCM_CODER<<8)|\ (IEC958_AES3_CON_FS_48000<<24)) const char *snd_pcm_format_name(snd_pcm_format_t format); /** * snd_pcm_direction_name - Get a string naming the direction of a stream * @direction: Stream's direction, one of SNDRV_PCM_STREAM_XXX * * Returns a string naming the direction of the stream. */ static inline const char *snd_pcm_direction_name(int direction) { if (direction == SNDRV_PCM_STREAM_PLAYBACK) return "Playback"; else return "Capture"; } /** * snd_pcm_stream_str - Get a string naming the direction of a stream * @substream: the pcm substream instance * * Return: A string naming the direction of the stream. */ static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream) { return snd_pcm_direction_name(substream->stream); } /* * PCM channel-mapping control API */ /* array element of channel maps */ struct snd_pcm_chmap_elem { unsigned char channels; unsigned char map[15]; }; /* channel map information; retrieved via snd_kcontrol_chip() */ struct snd_pcm_chmap { struct snd_pcm *pcm; /* assigned PCM instance */ int stream; /* PLAYBACK or CAPTURE */ struct snd_kcontrol *kctl; const struct snd_pcm_chmap_elem *chmap; unsigned int max_channels; unsigned int channel_mask; /* optional: active channels bitmask */ void *private_data; /* optional: private data pointer */ }; /** * snd_pcm_chmap_substream - get the PCM substream assigned to the given chmap info * @info: chmap information * @idx: the substream number index * * Return: the matched PCM substream, or NULL if not found */ static inline struct snd_pcm_substream * snd_pcm_chmap_substream(struct snd_pcm_chmap *info, unsigned int idx) { struct snd_pcm_substream *s; for (s = info->pcm->streams[info->stream].substream; s; s = s->next) if (s->number == idx) return s; return NULL; } /* ALSA-standard channel maps (RL/RR prior to C/LFE) */ extern const struct snd_pcm_chmap_elem snd_pcm_std_chmaps[]; /* Other world's standard channel maps (C/LFE prior to RL/RR) */ extern const struct snd_pcm_chmap_elem snd_pcm_alt_chmaps[]; /* bit masks to be passed to snd_pcm_chmap.channel_mask field */ #define SND_PCM_CHMAP_MASK_24 ((1U << 2) | (1U << 4)) #define SND_PCM_CHMAP_MASK_246 (SND_PCM_CHMAP_MASK_24 | (1U << 6)) #define SND_PCM_CHMAP_MASK_2468 (SND_PCM_CHMAP_MASK_246 | (1U << 8)) int snd_pcm_add_chmap_ctls(struct snd_pcm *pcm, int stream, const struct snd_pcm_chmap_elem *chmap, int max_channels, unsigned long private_value, struct snd_pcm_chmap **info_ret); /** * pcm_format_to_bits - Strong-typed conversion of pcm_format to bitwise * @pcm_format: PCM format * * Return: 64bit mask corresponding to the given PCM format */ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format) { return 1ULL << (__force int) pcm_format; } /** * pcm_for_each_format - helper to iterate for each format type * @f: the iterator variable in snd_pcm_format_t type */ #define pcm_for_each_format(f) \ for ((f) = SNDRV_PCM_FORMAT_FIRST; \ (__force int)(f) <= (__force int)SNDRV_PCM_FORMAT_LAST; \ (f) = (__force snd_pcm_format_t)((__force int)(f) + 1)) /* printk helpers */ #define pcm_err(pcm, fmt, args...) \ dev_err((pcm)->card->dev, fmt, ##args) #define pcm_warn(pcm, fmt, args...) \ dev_warn((pcm)->card->dev, fmt, ##args) #define pcm_dbg(pcm, fmt, args...) \ dev_dbg((pcm)->card->dev, fmt, ##args) /* helpers for copying between iov_iter and iomem */ size_t copy_to_iter_fromio(const void __iomem *src, size_t bytes, struct iov_iter *iter) __must_check; size_t copy_from_iter_toio(void __iomem *dst, size_t bytes, struct iov_iter *iter) __must_check; struct snd_pcm_status64 { snd_pcm_state_t state; /* stream state */ u8 rsvd[4]; s64 trigger_tstamp_sec; /* time when stream was started/stopped/paused */ s64 trigger_tstamp_nsec; s64 tstamp_sec; /* reference timestamp */ s64 tstamp_nsec; snd_pcm_uframes_t appl_ptr; /* appl ptr */ snd_pcm_uframes_t hw_ptr; /* hw ptr */ snd_pcm_sframes_t delay; /* current delay in frames */ snd_pcm_uframes_t avail; /* number of frames available */ snd_pcm_uframes_t avail_max; /* max frames available on hw since last status */ snd_pcm_uframes_t overrange; /* count of ADC (capture) overrange detections from last status */ snd_pcm_state_t suspended_state; /* suspended stream state */ __u32 audio_tstamp_data; /* needed for 64-bit alignment, used for configs/report to/from userspace */ s64 audio_tstamp_sec; /* sample counter, wall clock, PHC or on-demand sync'ed */ s64 audio_tstamp_nsec; s64 driver_tstamp_sec; /* useful in case reference system tstamp is reported with delay */ s64 driver_tstamp_nsec; __u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */ unsigned char reserved[52-4*sizeof(s64)]; /* must be filled with zero */ }; #define SNDRV_PCM_IOCTL_STATUS64 _IOR('A', 0x20, struct snd_pcm_status64) #define SNDRV_PCM_IOCTL_STATUS_EXT64 _IOWR('A', 0x24, struct snd_pcm_status64) struct snd_pcm_status32 { snd_pcm_state_t state; /* stream state */ s32 trigger_tstamp_sec; /* time when stream was started/stopped/paused */ s32 trigger_tstamp_nsec; s32 tstamp_sec; /* reference timestamp */ s32 tstamp_nsec; u32 appl_ptr; /* appl ptr */ u32 hw_ptr; /* hw ptr */ s32 delay; /* current delay in frames */ u32 avail; /* number of frames available */ u32 avail_max; /* max frames available on hw since last status */ u32 overrange; /* count of ADC (capture) overrange detections from last status */ snd_pcm_state_t suspended_state; /* suspended stream state */ u32 audio_tstamp_data; /* needed for 64-bit alignment, used for configs/report to/from userspace */ s32 audio_tstamp_sec; /* sample counter, wall clock, PHC or on-demand sync'ed */ s32 audio_tstamp_nsec; s32 driver_tstamp_sec; /* useful in case reference system tstamp is reported with delay */ s32 driver_tstamp_nsec; u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */ unsigned char reserved[52-4*sizeof(s32)]; /* must be filled with zero */ }; #define SNDRV_PCM_IOCTL_STATUS32 _IOR('A', 0x20, struct snd_pcm_status32) #define SNDRV_PCM_IOCTL_STATUS_EXT32 _IOWR('A', 0x24, struct snd_pcm_status32) #endif /* __SOUND_PCM_H */ |
| 173 63 135 133 135 134 134 116 115 116 115 1 115 101 18 116 115 115 116 110 112 26 1 90 61 49 110 1 1 112 1 112 111 111 1 112 112 111 110 111 48 78 56 77 48 54 75 4 52 51 31 42 39 1 1 5 2 5 3 52 47 80 79 47 5 112 111 47 78 15 15 15 15 15 11 4 15 32 31 6 126 23 114 26 22 14 9 9 83 154 74 30 99 83 51 144 108 68 84 200 104 86 9 1 9 7 2 4 4 9 127 9 120 158 134 128 1 134 9 10 10 10 10 45 62 20 5 1 26 1 1 1 45 115 116 11 1 2 6 1 10 6 1 4 1 4 5 179 163 52 14 5 6 5 106 2 100 1 5 5 2 3 147 34 92 1 29 29 29 102 104 22 2 20 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 | // SPDX-License-Identifier: GPL-2.0-or-later /* SCTP kernel implementation * (C) Copyright IBM Corp. 2002, 2004 * Copyright (c) 2001 Nokia, Inc. * Copyright (c) 2001 La Monte H.P. Yarroll * Copyright (c) 2002-2003 Intel Corp. * * This file is part of the SCTP kernel implementation * * SCTP over IPv6. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <linux-sctp@vger.kernel.org> * * Written or modified by: * Le Yanqun <yanqun.le@nokia.com> * Hui Huang <hui.huang@nokia.com> * La Monte H.P. Yarroll <piggy@acm.org> * Sridhar Samudrala <sri@us.ibm.com> * Jon Grimm <jgrimm@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * * Based on: * linux/net/ipv6/tcp_ipv6.c */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/ipsec.h> #include <linux/slab.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/random.h> #include <linux/seq_file.h> #include <net/protocol.h> #include <net/ndisc.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/transp_v6.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/inet_common.h> #include <net/inet_ecn.h> #include <net/sctp/sctp.h> #include <net/udp_tunnel.h> #include <linux/uaccess.h> static inline int sctp_v6_addr_match_len(union sctp_addr *s1, union sctp_addr *s2); static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, __be16 port); static int sctp_v6_cmp_addr(const union sctp_addr *addr1, const union sctp_addr *addr2); /* Event handler for inet6 address addition/deletion events. * The sctp_local_addr_list needs to be protocted by a spin lock since * multiple notifiers (say IPv4 and IPv6) may be running at the same * time and thus corrupt the list. * The reader side is protected with RCU. */ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, void *ptr) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; struct sctp_sockaddr_entry *addr = NULL; struct sctp_sockaddr_entry *temp; struct net *net = dev_net(ifa->idev->dev); int found = 0; switch (ev) { case NETDEV_UP: addr = kzalloc(sizeof(*addr), GFP_ATOMIC); if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_addr = ifa->addr; addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; addr->valid = 1; spin_lock_bh(&net->sctp.local_addr_lock); list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list); sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW); spin_unlock_bh(&net->sctp.local_addr_lock); } break; case NETDEV_DOWN: spin_lock_bh(&net->sctp.local_addr_lock); list_for_each_entry_safe(addr, temp, &net->sctp.local_addr_list, list) { if (addr->a.sa.sa_family == AF_INET6 && ipv6_addr_equal(&addr->a.v6.sin6_addr, &ifa->addr) && addr->a.v6.sin6_scope_id == ifa->idev->dev->ifindex) { found = 1; addr->valid = 0; list_del_rcu(&addr->list); sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL); break; } } spin_unlock_bh(&net->sctp.local_addr_lock); if (found) kfree_rcu(addr, rcu); break; } return NOTIFY_DONE; } static struct notifier_block sctp_inet6addr_notifier = { .notifier_call = sctp_inet6addr_event, }; static void sctp_v6_err_handle(struct sctp_transport *t, struct sk_buff *skb, __u8 type, __u8 code, __u32 info) { struct sctp_association *asoc = t->asoc; struct sock *sk = asoc->base.sk; int err = 0; switch (type) { case ICMPV6_PKT_TOOBIG: if (ip6_sk_accept_pmtu(sk)) sctp_icmp_frag_needed(sk, asoc, t, info); return; case ICMPV6_PARAMPROB: if (ICMPV6_UNK_NEXTHDR == code) { sctp_icmp_proto_unreachable(sk, asoc, t); return; } break; case NDISC_REDIRECT: sctp_icmp_redirect(sk, t, skb); return; default: break; } icmpv6_err_convert(type, code, &err); if (!sock_owned_by_user(sk) && inet6_test_bit(RECVERR6, sk)) { sk->sk_err = err; sk_error_report(sk); } else { WRITE_ONCE(sk->sk_err_soft, err); } } /* ICMP error handler. */ static int sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { struct net *net = dev_net(skb->dev); struct sctp_transport *transport; struct sctp_association *asoc; __u16 saveip, savesctp; struct sock *sk; /* Fix up skb to look at the embedded net header. */ saveip = skb->network_header; savesctp = skb->transport_header; skb_reset_network_header(skb); skb_set_transport_header(skb, offset); sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport); /* Put back, the original pointers. */ skb->network_header = saveip; skb->transport_header = savesctp; if (!sk) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return -ENOENT; } sctp_v6_err_handle(transport, skb, type, code, ntohl(info)); sctp_err_finish(sk, transport); return 0; } int sctp_udp_v6_err(struct sock *sk, struct sk_buff *skb) { struct net *net = dev_net(skb->dev); struct sctp_association *asoc; struct sctp_transport *t; struct icmp6hdr *hdr; __u32 info = 0; skb->transport_header += sizeof(struct udphdr); sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &t); if (!sk) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return -ENOENT; } skb->transport_header -= sizeof(struct udphdr); hdr = (struct icmp6hdr *)(skb_network_header(skb) - sizeof(struct icmp6hdr)); if (hdr->icmp6_type == NDISC_REDIRECT) { /* can't be handled without outer ip6hdr known, leave it to udpv6_err */ sctp_err_finish(sk, t); return 0; } if (hdr->icmp6_type == ICMPV6_PKT_TOOBIG) info = ntohl(hdr->icmp6_mtu); sctp_v6_err_handle(t, skb, hdr->icmp6_type, hdr->icmp6_code, info); sctp_err_finish(sk, t); return 1; } static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *t) { struct dst_entry *dst = dst_clone(t->dst); struct flowi6 *fl6 = &t->fl.u.ip6; struct sock *sk = skb->sk; struct ipv6_pinfo *np = inet6_sk(sk); __u8 tclass = np->tclass; __be32 label; pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, skb->len, &fl6->saddr, &fl6->daddr); if (t->dscp & SCTP_DSCP_SET_MASK) tclass = t->dscp & SCTP_DSCP_VAL_MASK; if (INET_ECN_is_capable(tclass)) IP6_ECN_flow_xmit(sk, fl6->flowlabel); if (!(t->param_flags & SPP_PMTUD_ENABLE)) skb->ignore_df = 1; SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); if (!t->encap_port || !sctp_sk(sk)->udp_port) { int res; skb_dst_set(skb, dst); rcu_read_lock(); res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt), tclass, READ_ONCE(sk->sk_priority)); rcu_read_unlock(); return res; } if (skb_is_gso(skb)) skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; skb->encapsulation = 1; skb_reset_inner_mac_header(skb); skb_reset_inner_transport_header(skb); skb_set_inner_ipproto(skb, IPPROTO_SCTP); label = ip6_make_flowlabel(sock_net(sk), skb, fl6->flowlabel, true, fl6); udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr, &fl6->daddr, tclass, ip6_dst_hoplimit(dst), label, sctp_sk(sk)->udp_port, t->encap_port, false, 0); return 0; } /* Returns the dst cache entry for the given source and destination ip * addresses. */ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, struct flowi *fl, struct sock *sk) { struct sctp_association *asoc = t->asoc; struct dst_entry *dst = NULL; struct flowi _fl; struct flowi6 *fl6 = &_fl.u.ip6; struct sctp_bind_addr *bp; struct ipv6_pinfo *np = inet6_sk(sk); struct sctp_sockaddr_entry *laddr; union sctp_addr *daddr = &t->ipaddr; union sctp_addr dst_saddr; struct in6_addr *final_p, final; enum sctp_scope scope; __u8 matchlen = 0; memset(&_fl, 0, sizeof(_fl)); fl6->daddr = daddr->v6.sin6_addr; fl6->fl6_dport = daddr->v6.sin6_port; fl6->flowi6_proto = IPPROTO_SCTP; if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) fl6->flowi6_oif = daddr->v6.sin6_scope_id; else if (asoc) fl6->flowi6_oif = asoc->base.sk->sk_bound_dev_if; if (t->flowlabel & SCTP_FLOWLABEL_SET_MASK) fl6->flowlabel = htonl(t->flowlabel & SCTP_FLOWLABEL_VAL_MASK); if (inet6_test_bit(SNDFLOW, sk) && (fl6->flowlabel & IPV6_FLOWLABEL_MASK)) { struct ip6_flowlabel *flowlabel; flowlabel = fl6_sock_lookup(sk, fl6->flowlabel); if (IS_ERR(flowlabel)) goto out; fl6_sock_release(flowlabel); } pr_debug("%s: dst=%pI6 ", __func__, &fl6->daddr); if (asoc) fl6->fl6_sport = htons(asoc->base.bind_addr.port); if (saddr) { fl6->saddr = saddr->v6.sin6_addr; if (!fl6->fl6_sport) fl6->fl6_sport = saddr->v6.sin6_port; pr_debug("src=%pI6 - ", &fl6->saddr); } rcu_read_lock(); final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); rcu_read_unlock(); dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (!asoc || saddr) { t->dst = dst; memcpy(fl, &_fl, sizeof(_fl)); goto out; } bp = &asoc->base.bind_addr; scope = sctp_scope(daddr); /* ip6_dst_lookup has filled in the fl6->saddr for us. Check * to see if we can use it. */ if (!IS_ERR(dst)) { /* Walk through the bind address list and look for a bind * address that matches the source address of the returned dst. */ sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port)); rcu_read_lock(); list_for_each_entry_rcu(laddr, &bp->address_list, list) { if (!laddr->valid || laddr->state == SCTP_ADDR_DEL || (laddr->state != SCTP_ADDR_SRC && !asoc->src_out_of_asoc_ok)) continue; /* Do not compare against v4 addrs */ if ((laddr->a.sa.sa_family == AF_INET6) && (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) { rcu_read_unlock(); t->dst = dst; memcpy(fl, &_fl, sizeof(_fl)); goto out; } } rcu_read_unlock(); /* None of the bound addresses match the source address of the * dst. So release it. */ dst_release(dst); dst = NULL; } /* Walk through the bind address list and try to get the * best source address for a given destination. */ rcu_read_lock(); list_for_each_entry_rcu(laddr, &bp->address_list, list) { struct dst_entry *bdst; __u8 bmatchlen; if (!laddr->valid || laddr->state != SCTP_ADDR_SRC || laddr->a.sa.sa_family != AF_INET6 || scope > sctp_scope(&laddr->a)) continue; fl6->saddr = laddr->a.v6.sin6_addr; fl6->fl6_sport = laddr->a.v6.sin6_port; final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); bdst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); if (IS_ERR(bdst)) continue; if (ipv6_chk_addr(dev_net(bdst->dev), &laddr->a.v6.sin6_addr, bdst->dev, 1)) { if (!IS_ERR_OR_NULL(dst)) dst_release(dst); dst = bdst; t->dst = dst; memcpy(fl, &_fl, sizeof(_fl)); break; } bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); if (matchlen > bmatchlen) { dst_release(bdst); continue; } if (!IS_ERR_OR_NULL(dst)) dst_release(dst); dst = bdst; matchlen = bmatchlen; t->dst = dst; memcpy(fl, &_fl, sizeof(_fl)); } rcu_read_unlock(); out: if (!IS_ERR_OR_NULL(dst)) { struct rt6_info *rt; rt = dst_rt6_info(dst); t->dst_cookie = rt6_get_cookie(rt); pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n", &rt->rt6i_dst.addr, rt->rt6i_dst.plen, &fl->u.ip6.saddr); } else { t->dst = NULL; pr_debug("no route\n"); } } /* Returns the number of consecutive initial bits that match in the 2 ipv6 * addresses. */ static inline int sctp_v6_addr_match_len(union sctp_addr *s1, union sctp_addr *s2) { return ipv6_addr_diff(&s1->v6.sin6_addr, &s2->v6.sin6_addr); } /* Fills in the source address(saddr) based on the destination address(daddr) * and asoc's bind address list. */ static void sctp_v6_get_saddr(struct sctp_sock *sk, struct sctp_transport *t, struct flowi *fl) { struct flowi6 *fl6 = &fl->u.ip6; union sctp_addr *saddr = &t->saddr; pr_debug("%s: asoc:%p dst:%p\n", __func__, t->asoc, t->dst); if (t->dst) { saddr->v6.sin6_family = AF_INET6; saddr->v6.sin6_addr = fl6->saddr; } } /* Make a copy of all potential local addresses. */ static void sctp_v6_copy_addrlist(struct list_head *addrlist, struct net_device *dev) { struct inet6_dev *in6_dev; struct inet6_ifaddr *ifp; struct sctp_sockaddr_entry *addr; rcu_read_lock(); if ((in6_dev = __in6_dev_get(dev)) == NULL) { rcu_read_unlock(); return; } read_lock_bh(&in6_dev->lock); list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { /* Add the address to the local list. */ addr = kzalloc(sizeof(*addr), GFP_ATOMIC); if (addr) { addr->a.v6.sin6_family = AF_INET6; addr->a.v6.sin6_addr = ifp->addr; addr->a.v6.sin6_scope_id = dev->ifindex; addr->valid = 1; INIT_LIST_HEAD(&addr->list); list_add_tail(&addr->list, addrlist); } } read_unlock_bh(&in6_dev->lock); rcu_read_unlock(); } /* Copy over any ip options */ static void sctp_v6_copy_ip_options(struct sock *sk, struct sock *newsk) { struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct ipv6_txoptions *opt; newnp = inet6_sk(newsk); rcu_read_lock(); opt = rcu_dereference(np->opt); if (opt) { opt = ipv6_dup_options(newsk, opt); if (!opt) pr_err("%s: Failed to copy ip options\n", __func__); } RCU_INIT_POINTER(newnp->opt, opt); rcu_read_unlock(); } /* Account for the IP options */ static int sctp_v6_ip_options_len(struct sock *sk) { struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_txoptions *opt; int len = 0; rcu_read_lock(); opt = rcu_dereference(np->opt); if (opt) len = opt->opt_flen + opt->opt_nflen; rcu_read_unlock(); return len; } /* Initialize a sockaddr_storage from in incoming skb. */ static void sctp_v6_from_skb(union sctp_addr *addr, struct sk_buff *skb, int is_saddr) { /* Always called on head skb, so this is safe */ struct sctphdr *sh = sctp_hdr(skb); struct sockaddr_in6 *sa = &addr->v6; addr->v6.sin6_family = AF_INET6; addr->v6.sin6_flowinfo = 0; /* FIXME */ addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif; if (is_saddr) { sa->sin6_port = sh->source; sa->sin6_addr = ipv6_hdr(skb)->saddr; } else { sa->sin6_port = sh->dest; sa->sin6_addr = ipv6_hdr(skb)->daddr; } } /* Initialize an sctp_addr from a socket. */ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) { addr->v6.sin6_family = AF_INET6; addr->v6.sin6_port = 0; addr->v6.sin6_addr = sk->sk_v6_rcv_saddr; } /* Initialize sk->sk_rcv_saddr from sctp_addr. */ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk) { if (addr->sa.sa_family == AF_INET) { sk->sk_v6_rcv_saddr.s6_addr32[0] = 0; sk->sk_v6_rcv_saddr.s6_addr32[1] = 0; sk->sk_v6_rcv_saddr.s6_addr32[2] = htonl(0x0000ffff); sk->sk_v6_rcv_saddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; } else { sk->sk_v6_rcv_saddr = addr->v6.sin6_addr; } } /* Initialize sk->sk_daddr from sctp_addr. */ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) { if (addr->sa.sa_family == AF_INET) { sk->sk_v6_daddr.s6_addr32[0] = 0; sk->sk_v6_daddr.s6_addr32[1] = 0; sk->sk_v6_daddr.s6_addr32[2] = htonl(0x0000ffff); sk->sk_v6_daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; } else { sk->sk_v6_daddr = addr->v6.sin6_addr; } } /* Initialize a sctp_addr from an address parameter. */ static bool sctp_v6_from_addr_param(union sctp_addr *addr, union sctp_addr_param *param, __be16 port, int iif) { if (ntohs(param->v6.param_hdr.length) < sizeof(struct sctp_ipv6addr_param)) return false; addr->v6.sin6_family = AF_INET6; addr->v6.sin6_port = port; addr->v6.sin6_flowinfo = 0; /* BUG */ addr->v6.sin6_addr = param->v6.addr; addr->v6.sin6_scope_id = iif; return true; } /* Initialize an address parameter from a sctp_addr and return the length * of the address parameter. */ static int sctp_v6_to_addr_param(const union sctp_addr *addr, union sctp_addr_param *param) { int length = sizeof(struct sctp_ipv6addr_param); param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS; param->v6.param_hdr.length = htons(length); param->v6.addr = addr->v6.sin6_addr; return length; } /* Initialize a sctp_addr from struct in6_addr. */ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, __be16 port) { addr->sa.sa_family = AF_INET6; addr->v6.sin6_port = port; addr->v6.sin6_flowinfo = 0; addr->v6.sin6_addr = *saddr; addr->v6.sin6_scope_id = 0; } static int __sctp_v6_cmp_addr(const union sctp_addr *addr1, const union sctp_addr *addr2) { if (addr1->sa.sa_family != addr2->sa.sa_family) { if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET6 && ipv6_addr_v4mapped(&addr2->v6.sin6_addr) && addr2->v6.sin6_addr.s6_addr32[3] == addr1->v4.sin_addr.s_addr) return 1; if (addr2->sa.sa_family == AF_INET && addr1->sa.sa_family == AF_INET6 && ipv6_addr_v4mapped(&addr1->v6.sin6_addr) && addr1->v6.sin6_addr.s6_addr32[3] == addr2->v4.sin_addr.s_addr) return 1; return 0; } if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) return 0; /* If this is a linklocal address, compare the scope_id. */ if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) && addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id) return 0; return 1; } /* Compare addresses exactly. * v4-mapped-v6 is also in consideration. */ static int sctp_v6_cmp_addr(const union sctp_addr *addr1, const union sctp_addr *addr2) { return __sctp_v6_cmp_addr(addr1, addr2) && addr1->v6.sin6_port == addr2->v6.sin6_port; } /* Initialize addr struct to INADDR_ANY. */ static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) { memset(addr, 0x00, sizeof(union sctp_addr)); addr->v6.sin6_family = AF_INET6; addr->v6.sin6_port = port; } /* Is this a wildcard address? */ static int sctp_v6_is_any(const union sctp_addr *addr) { return ipv6_addr_any(&addr->v6.sin6_addr); } /* Should this be available for binding? */ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) { const struct in6_addr *in6 = (const struct in6_addr *)&addr->v6.sin6_addr; struct sock *sk = &sp->inet.sk; struct net *net = sock_net(sk); struct net_device *dev = NULL; int type, res, bound_dev_if; type = ipv6_addr_type(in6); if (IPV6_ADDR_ANY == type) return 1; if (type == IPV6_ADDR_MAPPED) { if (sp && ipv6_only_sock(sctp_opt2sk(sp))) return 0; sctp_v6_map_v4(addr); return sctp_get_af_specific(AF_INET)->available(addr, sp); } if (!(type & IPV6_ADDR_UNICAST)) return 0; rcu_read_lock(); bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); if (bound_dev_if) { res = 0; dev = dev_get_by_index_rcu(net, bound_dev_if); if (!dev) goto out; } res = ipv6_can_nonlocal_bind(net, &sp->inet) || ipv6_chk_addr(net, in6, dev, 0); out: rcu_read_unlock(); return res; } /* This function checks if the address is a valid address to be used for * SCTP. * * Output: * Return 0 - If the address is a non-unicast or an illegal address. * Return 1 - If the address is a unicast. */ static int sctp_v6_addr_valid(union sctp_addr *addr, struct sctp_sock *sp, const struct sk_buff *skb) { int ret = ipv6_addr_type(&addr->v6.sin6_addr); /* Support v4-mapped-v6 address. */ if (ret == IPV6_ADDR_MAPPED) { /* Note: This routine is used in input, so v4-mapped-v6 * are disallowed here when there is no sctp_sock. */ if (sp && ipv6_only_sock(sctp_opt2sk(sp))) return 0; sctp_v6_map_v4(addr); return sctp_get_af_specific(AF_INET)->addr_valid(addr, sp, skb); } /* Is this a non-unicast address */ if (!(ret & IPV6_ADDR_UNICAST)) return 0; return 1; } /* What is the scope of 'addr'? */ static enum sctp_scope sctp_v6_scope(union sctp_addr *addr) { enum sctp_scope retval; int v6scope; /* The IPv6 scope is really a set of bit fields. * See IFA_* in <net/if_inet6.h>. Map to a generic SCTP scope. */ v6scope = ipv6_addr_scope(&addr->v6.sin6_addr); switch (v6scope) { case IFA_HOST: retval = SCTP_SCOPE_LOOPBACK; break; case IFA_LINK: retval = SCTP_SCOPE_LINK; break; case IFA_SITE: retval = SCTP_SCOPE_PRIVATE; break; default: retval = SCTP_SCOPE_GLOBAL; break; } return retval; } /* Create and initialize a new sk for the socket to be returned by accept(). */ static struct sock *sctp_v6_create_accept_sk(struct sock *sk, struct sctp_association *asoc, bool kern) { struct sock *newsk; struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct sctp6_sock *newsctp6sk; newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot, kern); if (!newsk) goto out; sock_init_data(NULL, newsk); sctp_copy_sock(newsk, sk, asoc); sock_reset_flag(sk, SOCK_ZAPPED); newsctp6sk = (struct sctp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newsctp6sk->inet6; sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped; newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newnp->ipv6_mc_list = NULL; newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; sctp_v6_copy_ip_options(sk, newsk); /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() * and getpeername(). */ sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr; if (newsk->sk_prot->init(newsk)) { sk_common_release(newsk); newsk = NULL; } out: return newsk; } /* Format a sockaddr for return to user space. This makes sure the return is * AF_INET or AF_INET6 depending on the SCTP_I_WANT_MAPPED_V4_ADDR option. */ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) { if (sp->v4mapped) { if (addr->sa.sa_family == AF_INET) sctp_v4_map_v6(addr); } else { if (addr->sa.sa_family == AF_INET6 && ipv6_addr_v4mapped(&addr->v6.sin6_addr)) sctp_v6_map_v4(addr); } if (addr->sa.sa_family == AF_INET) { memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); return sizeof(struct sockaddr_in); } return sizeof(struct sockaddr_in6); } /* Where did this skb come from? */ static int sctp_v6_skb_iif(const struct sk_buff *skb) { return inet6_iif(skb); } static int sctp_v6_skb_sdif(const struct sk_buff *skb) { return inet6_sdif(skb); } /* Was this packet marked by Explicit Congestion Notification? */ static int sctp_v6_is_ce(const struct sk_buff *skb) { return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20); } /* Dump the v6 addr to the seq file. */ static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) { seq_printf(seq, "%pI6 ", &addr->v6.sin6_addr); } static void sctp_v6_ecn_capable(struct sock *sk) { inet6_sk(sk)->tclass |= INET_ECN_ECT_0; } /* Initialize a PF_INET msgname from a ulpevent. */ static void sctp_inet6_event_msgname(struct sctp_ulpevent *event, char *msgname, int *addrlen) { union sctp_addr *addr; struct sctp_association *asoc; union sctp_addr *paddr; if (!msgname) return; addr = (union sctp_addr *)msgname; asoc = event->asoc; paddr = &asoc->peer.primary_addr; if (paddr->sa.sa_family == AF_INET) { addr->v4.sin_family = AF_INET; addr->v4.sin_port = htons(asoc->peer.port); addr->v4.sin_addr = paddr->v4.sin_addr; } else { addr->v6.sin6_family = AF_INET6; addr->v6.sin6_flowinfo = 0; if (ipv6_addr_type(&paddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) addr->v6.sin6_scope_id = paddr->v6.sin6_scope_id; else addr->v6.sin6_scope_id = 0; addr->v6.sin6_port = htons(asoc->peer.port); addr->v6.sin6_addr = paddr->v6.sin6_addr; } *addrlen = sctp_v6_addr_to_user(sctp_sk(asoc->base.sk), addr); } /* Initialize a msg_name from an inbound skb. */ static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, int *addr_len) { union sctp_addr *addr; struct sctphdr *sh; if (!msgname) return; addr = (union sctp_addr *)msgname; sh = sctp_hdr(skb); if (ip_hdr(skb)->version == 4) { addr->v4.sin_family = AF_INET; addr->v4.sin_port = sh->source; addr->v4.sin_addr.s_addr = ip_hdr(skb)->saddr; } else { addr->v6.sin6_family = AF_INET6; addr->v6.sin6_flowinfo = 0; addr->v6.sin6_port = sh->source; addr->v6.sin6_addr = ipv6_hdr(skb)->saddr; if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) addr->v6.sin6_scope_id = sctp_v6_skb_iif(skb); else addr->v6.sin6_scope_id = 0; } *addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr); } /* Do we support this AF? */ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp) { switch (family) { case AF_INET6: return 1; /* v4-mapped-v6 addresses */ case AF_INET: if (!ipv6_only_sock(sctp_opt2sk(sp))) return 1; fallthrough; default: return 0; } } /* Address matching with wildcards allowed. This extra level * of indirection lets us choose whether a PF_INET6 should * disallow any v4 addresses if we so choose. */ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, const union sctp_addr *addr2, struct sctp_sock *opt) { struct sock *sk = sctp_opt2sk(opt); struct sctp_af *af1, *af2; af1 = sctp_get_af_specific(addr1->sa.sa_family); af2 = sctp_get_af_specific(addr2->sa.sa_family); if (!af1 || !af2) return 0; /* If the socket is IPv6 only, v4 addrs will not match */ if (ipv6_only_sock(sk) && af1 != af2) return 0; /* Today, wildcard AF_INET/AF_INET6. */ if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) return 1; if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET) return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr; return __sctp_v6_cmp_addr(addr1, addr2); } /* Verify that the provided sockaddr looks bindable. Common verification, * has already been taken care of. */ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) { struct sctp_af *af; /* ASSERT: address family has already been verified. */ if (addr->sa.sa_family != AF_INET6) af = sctp_get_af_specific(addr->sa.sa_family); else { int type = ipv6_addr_type(&addr->v6.sin6_addr); struct net_device *dev; if (type & IPV6_ADDR_LINKLOCAL) { struct net *net; if (!addr->v6.sin6_scope_id) return 0; net = sock_net(&opt->inet.sk); rcu_read_lock(); dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id); if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) || ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0))) { rcu_read_unlock(); return 0; } rcu_read_unlock(); } af = opt->pf->af; } return af->available(addr, opt); } /* Verify that the provided sockaddr looks sendable. Common verification, * has already been taken care of. */ static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr) { struct sctp_af *af = NULL; /* ASSERT: address family has already been verified. */ if (addr->sa.sa_family != AF_INET6) af = sctp_get_af_specific(addr->sa.sa_family); else { int type = ipv6_addr_type(&addr->v6.sin6_addr); struct net_device *dev; if (type & IPV6_ADDR_LINKLOCAL) { if (!addr->v6.sin6_scope_id) return 0; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(&opt->inet.sk), addr->v6.sin6_scope_id); rcu_read_unlock(); if (!dev) return 0; } af = opt->pf->af; } return af != NULL; } /* Fill in Supported Address Type information for INIT and INIT-ACK * chunks. Note: In the future, we may want to look at sock options * to determine whether a PF_INET6 socket really wants to have IPV4 * addresses. * Returns number of addresses supported. */ static int sctp_inet6_supported_addrs(const struct sctp_sock *opt, __be16 *types) { types[0] = SCTP_PARAM_IPV6_ADDRESS; if (!opt || !ipv6_only_sock(sctp_opt2sk(opt))) { types[1] = SCTP_PARAM_IPV4_ADDRESS; return 2; } return 1; } /* Handle SCTP_I_WANT_MAPPED_V4_ADDR for getpeername() and getsockname() */ static int sctp_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { int rc; rc = inet6_getname(sock, uaddr, peer); if (rc < 0) return rc; rc = sctp_v6_addr_to_user(sctp_sk(sock->sk), (union sctp_addr *)uaddr); return rc; } static const struct proto_ops inet6_seqpacket_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = sctp_inet_connect, .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = sctp_getname, .poll = sctp_poll, .ioctl = inet6_ioctl, .gettstamp = sock_gettstamp, .listen = sctp_inet_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = inet_recvmsg, .mmap = sock_no_mmap, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, #endif }; static struct inet_protosw sctpv6_seqpacket_protosw = { .type = SOCK_SEQPACKET, .protocol = IPPROTO_SCTP, .prot = &sctpv6_prot, .ops = &inet6_seqpacket_ops, .flags = SCTP_PROTOSW_FLAG }; static struct inet_protosw sctpv6_stream_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_SCTP, .prot = &sctpv6_prot, .ops = &inet6_seqpacket_ops, .flags = SCTP_PROTOSW_FLAG, }; static int sctp6_rcv(struct sk_buff *skb) { SCTP_INPUT_CB(skb)->encap_port = 0; return sctp_rcv(skb) ? -1 : 0; } static const struct inet6_protocol sctpv6_protocol = { .handler = sctp6_rcv, .err_handler = sctp_v6_err, .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, }; static struct sctp_af sctp_af_inet6 = { .sa_family = AF_INET6, .sctp_xmit = sctp_v6_xmit, .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .get_dst = sctp_v6_get_dst, .get_saddr = sctp_v6_get_saddr, .copy_addrlist = sctp_v6_copy_addrlist, .from_skb = sctp_v6_from_skb, .from_sk = sctp_v6_from_sk, .from_addr_param = sctp_v6_from_addr_param, .to_addr_param = sctp_v6_to_addr_param, .cmp_addr = sctp_v6_cmp_addr, .scope = sctp_v6_scope, .addr_valid = sctp_v6_addr_valid, .inaddr_any = sctp_v6_inaddr_any, .is_any = sctp_v6_is_any, .available = sctp_v6_available, .skb_iif = sctp_v6_skb_iif, .skb_sdif = sctp_v6_skb_sdif, .is_ce = sctp_v6_is_ce, .seq_dump_addr = sctp_v6_seq_dump_addr, .ecn_capable = sctp_v6_ecn_capable, .net_header_len = sizeof(struct ipv6hdr), .sockaddr_len = sizeof(struct sockaddr_in6), .ip_options_len = sctp_v6_ip_options_len, }; static struct sctp_pf sctp_pf_inet6 = { .event_msgname = sctp_inet6_event_msgname, .skb_msgname = sctp_inet6_skb_msgname, .af_supported = sctp_inet6_af_supported, .cmp_addr = sctp_inet6_cmp_addr, .bind_verify = sctp_inet6_bind_verify, .send_verify = sctp_inet6_send_verify, .supported_addrs = sctp_inet6_supported_addrs, .create_accept_sk = sctp_v6_create_accept_sk, .addr_to_user = sctp_v6_addr_to_user, .to_sk_saddr = sctp_v6_to_sk_saddr, .to_sk_daddr = sctp_v6_to_sk_daddr, .copy_ip_options = sctp_v6_copy_ip_options, .af = &sctp_af_inet6, }; /* Initialize IPv6 support and register with socket layer. */ void sctp_v6_pf_init(void) { /* Register the SCTP specific PF_INET6 functions. */ sctp_register_pf(&sctp_pf_inet6, PF_INET6); /* Register the SCTP specific AF_INET6 functions. */ sctp_register_af(&sctp_af_inet6); } void sctp_v6_pf_exit(void) { list_del(&sctp_af_inet6.list); } /* Initialize IPv6 support and register with socket layer. */ int sctp_v6_protosw_init(void) { int rc; rc = proto_register(&sctpv6_prot, 1); if (rc) return rc; /* Add SCTPv6(UDP and TCP style) to inetsw6 linked list. */ inet6_register_protosw(&sctpv6_seqpacket_protosw); inet6_register_protosw(&sctpv6_stream_protosw); return 0; } void sctp_v6_protosw_exit(void) { inet6_unregister_protosw(&sctpv6_seqpacket_protosw); inet6_unregister_protosw(&sctpv6_stream_protosw); proto_unregister(&sctpv6_prot); } /* Register with inet6 layer. */ int sctp_v6_add_protocol(void) { /* Register notifier for inet6 address additions/deletions. */ register_inet6addr_notifier(&sctp_inet6addr_notifier); if (inet6_add_protocol(&sctpv6_protocol, IPPROTO_SCTP) < 0) return -EAGAIN; return 0; } /* Unregister with inet6 layer. */ void sctp_v6_del_protocol(void) { inet6_del_protocol(&sctpv6_protocol, IPPROTO_SCTP); unregister_inet6addr_notifier(&sctp_inet6addr_notifier); } |
| 6 6 2 2 2 2 3 3 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2007 Andi Kleen, SUSE Labs. * * This contains most of the x86 vDSO kernel-side code. */ #include <linux/mm.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/random.h> #include <linux/elf.h> #include <linux/cpu.h> #include <linux/ptrace.h> #include <linux/vdso_datastore.h> #include <asm/pvclock.h> #include <asm/vgtod.h> #include <asm/proto.h> #include <asm/vdso.h> #include <asm/tlb.h> #include <asm/page.h> #include <asm/desc.h> #include <asm/cpufeature.h> #include <asm/vdso/vsyscall.h> #include <clocksource/hyperv_timer.h> static_assert(VDSO_NR_PAGES + VDSO_NR_VCLOCK_PAGES == __VDSO_PAGES); unsigned int vclocks_used __read_mostly; #if defined(CONFIG_X86_64) unsigned int __read_mostly vdso64_enabled = 1; #endif int __init init_vdso_image(const struct vdso_image *image) { BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32); BUG_ON(image->size % PAGE_SIZE != 0); apply_alternatives((struct alt_instr *)(image->data + image->alt), (struct alt_instr *)(image->data + image->alt + image->alt_len)); return 0; } struct linux_binprm; static vm_fault_t vdso_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) { const struct vdso_image *image = vma->vm_mm->context.vdso_image; if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size) return VM_FAULT_SIGBUS; vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT)); get_page(vmf->page); return 0; } static void vdso_fix_landing(const struct vdso_image *image, struct vm_area_struct *new_vma) { if (in_ia32_syscall() && image == &vdso_image_32) { struct pt_regs *regs = current_pt_regs(); unsigned long vdso_land = image->sym_int80_landing_pad; unsigned long old_land_addr = vdso_land + (unsigned long)current->mm->context.vdso; /* Fixing userspace landing - look at do_fast_syscall_32 */ if (regs->ip == old_land_addr) regs->ip = new_vma->vm_start + vdso_land; } } static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma) { const struct vdso_image *image = current->mm->context.vdso_image; vdso_fix_landing(image, new_vma); current->mm->context.vdso = (void __user *)new_vma->vm_start; return 0; } static vm_fault_t vvar_vclock_fault(const struct vm_special_mapping *sm, struct vm_area_struct *vma, struct vm_fault *vmf) { switch (vmf->pgoff) { #ifdef CONFIG_PARAVIRT_CLOCK case VDSO_PAGE_PVCLOCK_OFFSET: { struct pvclock_vsyscall_time_info *pvti = pvclock_get_pvti_cpu0_va(); if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) return vmf_insert_pfn_prot(vma, vmf->address, __pa(pvti) >> PAGE_SHIFT, pgprot_decrypted(vma->vm_page_prot)); break; } #endif /* CONFIG_PARAVIRT_CLOCK */ #ifdef CONFIG_HYPERV_TIMER case VDSO_PAGE_HVCLOCK_OFFSET: { unsigned long pfn = hv_get_tsc_pfn(); if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK)) return vmf_insert_pfn(vma, vmf->address, pfn); break; } #endif /* CONFIG_HYPERV_TIMER */ } return VM_FAULT_SIGBUS; } static const struct vm_special_mapping vdso_mapping = { .name = "[vdso]", .fault = vdso_fault, .mremap = vdso_mremap, }; static const struct vm_special_mapping vvar_vclock_mapping = { .name = "[vvar_vclock]", .fault = vvar_vclock_fault, }; /* * Add vdso and vvar mappings to current process. * @image - blob to map * @addr - request a specific address (zero to map at free addr) */ static int map_vdso(const struct vdso_image *image, unsigned long addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long text_start; int ret = 0; if (mmap_write_lock_killable(mm)) return -EINTR; addr = get_unmapped_area(NULL, addr, image->size + __VDSO_PAGES * PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(addr)) { ret = addr; goto up_fail; } text_start = addr + __VDSO_PAGES * PAGE_SIZE; /* * MAYWRITE to allow gdb to COW and set breakpoints */ vma = _install_special_mapping(mm, text_start, image->size, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| VM_SEALED_SYSMAP, &vdso_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto up_fail; } vma = vdso_install_vvar_mapping(mm, addr); if (IS_ERR(vma)) { ret = PTR_ERR(vma); do_munmap(mm, text_start, image->size, NULL); goto up_fail; } vma = _install_special_mapping(mm, VDSO_VCLOCK_PAGES_START(addr), VDSO_NR_VCLOCK_PAGES * PAGE_SIZE, VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP| VM_PFNMAP|VM_SEALED_SYSMAP, &vvar_vclock_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); do_munmap(mm, text_start, image->size, NULL); do_munmap(mm, addr, image->size, NULL); goto up_fail; } current->mm->context.vdso = (void __user *)text_start; current->mm->context.vdso_image = image; up_fail: mmap_write_unlock(mm); return ret; } int map_vdso_once(const struct vdso_image *image, unsigned long addr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; VMA_ITERATOR(vmi, mm, 0); mmap_write_lock(mm); /* * Check if we have already mapped vdso blob - fail to prevent * abusing from userspace install_special_mapping, which may * not do accounting and rlimit right. * We could search vma near context.vdso, but it's a slowpath, * so let's explicitly check all VMAs to be completely sure. */ for_each_vma(vmi, vma) { if (vma_is_special_mapping(vma, &vdso_mapping) || vma_is_special_mapping(vma, &vdso_vvar_mapping) || vma_is_special_mapping(vma, &vvar_vclock_mapping)) { mmap_write_unlock(mm); return -EEXIST; } } mmap_write_unlock(mm); return map_vdso(image, addr); } static int load_vdso32(void) { if (vdso32_enabled != 1) /* Other values all mean "disabled" */ return 0; return map_vdso(&vdso_image_32, 0); } int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { if (IS_ENABLED(CONFIG_X86_64)) { if (!vdso64_enabled) return 0; return map_vdso(&vdso_image_64, 0); } return load_vdso32(); } #ifdef CONFIG_COMPAT int compat_arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp, bool x32) { if (IS_ENABLED(CONFIG_X86_X32_ABI) && x32) { if (!vdso64_enabled) return 0; return map_vdso(&vdso_image_x32, 0); } if (IS_ENABLED(CONFIG_IA32_EMULATION)) return load_vdso32(); return 0; } #endif bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs) { const struct vdso_image *image = current->mm->context.vdso_image; unsigned long vdso = (unsigned long) current->mm->context.vdso; if (in_ia32_syscall() && image == &vdso_image_32) { if (regs->ip == vdso + image->sym_vdso32_sigreturn_landing_pad || regs->ip == vdso + image->sym_vdso32_rt_sigreturn_landing_pad) return true; } return false; } #ifdef CONFIG_X86_64 static __init int vdso_setup(char *s) { vdso64_enabled = simple_strtoul(s, NULL, 0); return 1; } __setup("vdso=", vdso_setup); #endif /* CONFIG_X86_64 */ |
| 2 1 1 4 3 1 8 4 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 | // SPDX-License-Identifier: GPL-2.0 /* * Zoned block device handling * * Copyright (c) 2015, Hannes Reinecke * Copyright (c) 2015, SUSE Linux GmbH * * Copyright (c) 2016, Damien Le Moal * Copyright (c) 2016, Western Digital * Copyright (c) 2024, Western Digital Corporation or its affiliates. */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/spinlock.h> #include <linux/refcount.h> #include <linux/mempool.h> #include <trace/events/block.h> #include "blk.h" #include "blk-mq-sched.h" #include "blk-mq-debugfs.h" #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name static const char *const zone_cond_name[] = { ZONE_COND_NAME(NOT_WP), ZONE_COND_NAME(EMPTY), ZONE_COND_NAME(IMP_OPEN), ZONE_COND_NAME(EXP_OPEN), ZONE_COND_NAME(CLOSED), ZONE_COND_NAME(READONLY), ZONE_COND_NAME(FULL), ZONE_COND_NAME(OFFLINE), }; #undef ZONE_COND_NAME /* * Per-zone write plug. * @node: hlist_node structure for managing the plug using a hash table. * @ref: Zone write plug reference counter. A zone write plug reference is * always at least 1 when the plug is hashed in the disk plug hash table. * The reference is incremented whenever a new BIO needing plugging is * submitted and when a function needs to manipulate a plug. The * reference count is decremented whenever a plugged BIO completes and * when a function that referenced the plug returns. The initial * reference is dropped whenever the zone of the zone write plug is reset, * finished and when the zone becomes full (last write BIO to the zone * completes). * @lock: Spinlock to atomically manipulate the plug. * @flags: Flags indicating the plug state. * @zone_no: The number of the zone the plug is managing. * @wp_offset: The zone write pointer location relative to the start of the zone * as a number of 512B sectors. * @bio_list: The list of BIOs that are currently plugged. * @bio_work: Work struct to handle issuing of plugged BIOs * @rcu_head: RCU head to free zone write plugs with an RCU grace period. * @disk: The gendisk the plug belongs to. */ struct blk_zone_wplug { struct hlist_node node; refcount_t ref; spinlock_t lock; unsigned int flags; unsigned int zone_no; unsigned int wp_offset; struct bio_list bio_list; struct work_struct bio_work; struct rcu_head rcu_head; struct gendisk *disk; }; /* * Zone write plug flags bits: * - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged, * that is, that write BIOs are being throttled due to a write BIO already * being executed or the zone write plug bio list is not empty. * - BLK_ZONE_WPLUG_NEED_WP_UPDATE: Indicates that we lost track of a zone * write pointer offset and need to update it. * - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed * from the disk hash table and that the initial reference to the zone * write plug set when the plug was first added to the hash table has been * dropped. This flag is set when a zone is reset, finished or become full, * to prevent new references to the zone write plug to be taken for * newly incoming BIOs. A zone write plug flagged with this flag will be * freed once all remaining references from BIOs or functions are dropped. */ #define BLK_ZONE_WPLUG_PLUGGED (1U << 0) #define BLK_ZONE_WPLUG_NEED_WP_UPDATE (1U << 1) #define BLK_ZONE_WPLUG_UNHASHED (1U << 2) /** * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX. * @zone_cond: BLK_ZONE_COND_XXX. * * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX * into string format. Useful in the debugging and tracing zone conditions. For * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN". */ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond) { static const char *zone_cond_str = "UNKNOWN"; if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond]) zone_cond_str = zone_cond_name[zone_cond]; return zone_cond_str; } EXPORT_SYMBOL_GPL(blk_zone_cond_str); struct disk_report_zones_cb_args { struct gendisk *disk; report_zones_cb user_cb; void *user_data; }; static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk, struct blk_zone *zone); static int disk_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data) { struct disk_report_zones_cb_args *args = data; struct gendisk *disk = args->disk; if (disk->zone_wplugs_hash) disk_zone_wplug_sync_wp_offset(disk, zone); if (!args->user_cb) return 0; return args->user_cb(zone, idx, args->user_data); } /** * blkdev_report_zones - Get zones information * @bdev: Target block device * @sector: Sector from which to report zones * @nr_zones: Maximum number of zones to report * @cb: Callback function called for each reported zone * @data: Private data for the callback * * Description: * Get zone information starting from the zone containing @sector for at most * @nr_zones, and call @cb for each zone reported by the device. * To report all zones in a device starting from @sector, the BLK_ALL_ZONES * constant can be passed to @nr_zones. * Returns the number of zones reported by the device, or a negative errno * value in case of failure. * * Note: The caller must use memalloc_noXX_save/restore() calls to control * memory allocations done within this function. */ int blkdev_report_zones(struct block_device *bdev, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data) { struct gendisk *disk = bdev->bd_disk; sector_t capacity = get_capacity(disk); struct disk_report_zones_cb_args args = { .disk = disk, .user_cb = cb, .user_data = data, }; if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones)) return -EOPNOTSUPP; if (!nr_zones || sector >= capacity) return 0; return disk->fops->report_zones(disk, sector, nr_zones, disk_report_zones_cb, &args); } EXPORT_SYMBOL_GPL(blkdev_report_zones); static int blkdev_zone_reset_all(struct block_device *bdev) { struct bio bio; bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC); trace_blkdev_zone_mgmt(&bio, 0); return submit_bio_wait(&bio); } /** * blkdev_zone_mgmt - Execute a zone management operation on a range of zones * @bdev: Target block device * @op: Operation to be performed on the zones * @sector: Start sector of the first zone to operate on * @nr_sectors: Number of sectors, should be at least the length of one zone and * must be zone size aligned. * * Description: * Perform the specified operation on the range of zones specified by * @sector..@sector+@nr_sectors. Specifying the entire disk sector range * is valid, but the specified range should not contain conventional zones. * The operation to execute on each zone can be a zone reset, open, close * or finish request. */ int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, sector_t sector, sector_t nr_sectors) { sector_t zone_sectors = bdev_zone_sectors(bdev); sector_t capacity = bdev_nr_sectors(bdev); sector_t end_sector = sector + nr_sectors; struct bio *bio = NULL; int ret = 0; if (!bdev_is_zoned(bdev)) return -EOPNOTSUPP; if (bdev_read_only(bdev)) return -EPERM; if (!op_is_zone_mgmt(op)) return -EOPNOTSUPP; if (end_sector <= sector || end_sector > capacity) /* Out of range */ return -EINVAL; /* Check alignment (handle eventual smaller last zone) */ if (!bdev_is_zone_start(bdev, sector)) return -EINVAL; if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity) return -EINVAL; /* * In the case of a zone reset operation over all zones, use * REQ_OP_ZONE_RESET_ALL. */ if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) return blkdev_zone_reset_all(bdev); while (sector < end_sector) { bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, GFP_KERNEL); bio->bi_iter.bi_sector = sector; sector += zone_sectors; /* This may take a while, so be nice to others */ cond_resched(); } trace_blkdev_zone_mgmt(bio, nr_sectors); ret = submit_bio_wait(bio); bio_put(bio); return ret; } EXPORT_SYMBOL_GPL(blkdev_zone_mgmt); struct zone_report_args { struct blk_zone __user *zones; }; static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx, void *data) { struct zone_report_args *args = data; if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone))) return -EFAULT; return 0; } /* * BLKREPORTZONE ioctl processing. * Called from blkdev_ioctl. */ int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct zone_report_args args; struct blk_zone_report rep; int ret; if (!argp) return -EINVAL; if (!bdev_is_zoned(bdev)) return -ENOTTY; if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) return -EFAULT; if (!rep.nr_zones) return -EINVAL; args.zones = argp + sizeof(struct blk_zone_report); ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, blkdev_copy_zone_to_user, &args); if (ret < 0) return ret; rep.nr_zones = ret; rep.flags = BLK_ZONE_REP_CAPACITY; if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) return -EFAULT; return 0; } static int blkdev_truncate_zone_range(struct block_device *bdev, blk_mode_t mode, const struct blk_zone_range *zrange) { loff_t start, end; if (zrange->sector + zrange->nr_sectors <= zrange->sector || zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) /* Out of range */ return -EINVAL; start = zrange->sector << SECTOR_SHIFT; end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; return truncate_bdev_range(bdev, mode, start, end); } /* * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing. * Called from blkdev_ioctl. */ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct blk_zone_range zrange; enum req_op op; int ret; if (!argp) return -EINVAL; if (!bdev_is_zoned(bdev)) return -ENOTTY; if (!(mode & BLK_OPEN_WRITE)) return -EBADF; if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) return -EFAULT; switch (cmd) { case BLKRESETZONE: op = REQ_OP_ZONE_RESET; /* Invalidate the page cache, including dirty pages. */ inode_lock(bdev->bd_mapping->host); filemap_invalidate_lock(bdev->bd_mapping); ret = blkdev_truncate_zone_range(bdev, mode, &zrange); if (ret) goto fail; break; case BLKOPENZONE: op = REQ_OP_ZONE_OPEN; break; case BLKCLOSEZONE: op = REQ_OP_ZONE_CLOSE; break; case BLKFINISHZONE: op = REQ_OP_ZONE_FINISH; break; default: return -ENOTTY; } ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors); fail: if (cmd == BLKRESETZONE) { filemap_invalidate_unlock(bdev->bd_mapping); inode_unlock(bdev->bd_mapping->host); } return ret; } static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone) { return zone->start + zone->len >= get_capacity(disk); } static bool disk_zone_is_full(struct gendisk *disk, unsigned int zno, unsigned int offset_in_zone) { if (zno < disk->nr_zones - 1) return offset_in_zone >= disk->zone_capacity; return offset_in_zone >= disk->last_zone_capacity; } static bool disk_zone_wplug_is_full(struct gendisk *disk, struct blk_zone_wplug *zwplug) { return disk_zone_is_full(disk, zwplug->zone_no, zwplug->wp_offset); } static bool disk_insert_zone_wplug(struct gendisk *disk, struct blk_zone_wplug *zwplug) { struct blk_zone_wplug *zwplg; unsigned long flags; unsigned int idx = hash_32(zwplug->zone_no, disk->zone_wplugs_hash_bits); /* * Add the new zone write plug to the hash table, but carefully as we * are racing with other submission context, so we may already have a * zone write plug for the same zone. */ spin_lock_irqsave(&disk->zone_wplugs_lock, flags); hlist_for_each_entry_rcu(zwplg, &disk->zone_wplugs_hash[idx], node) { if (zwplg->zone_no == zwplug->zone_no) { spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); return false; } } hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]); atomic_inc(&disk->nr_zone_wplugs); spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); return true; } static struct blk_zone_wplug *disk_get_hashed_zone_wplug(struct gendisk *disk, sector_t sector) { unsigned int zno = disk_zone_no(disk, sector); unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits); struct blk_zone_wplug *zwplug; rcu_read_lock(); hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) { if (zwplug->zone_no == zno && refcount_inc_not_zero(&zwplug->ref)) { rcu_read_unlock(); return zwplug; } } rcu_read_unlock(); return NULL; } static inline struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk, sector_t sector) { if (!atomic_read(&disk->nr_zone_wplugs)) return NULL; return disk_get_hashed_zone_wplug(disk, sector); } static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head) { struct blk_zone_wplug *zwplug = container_of(rcu_head, struct blk_zone_wplug, rcu_head); mempool_free(zwplug, zwplug->disk->zone_wplugs_pool); } static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug) { if (refcount_dec_and_test(&zwplug->ref)) { WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list)); WARN_ON_ONCE(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED); WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_UNHASHED)); call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu); } } static inline bool disk_should_remove_zone_wplug(struct gendisk *disk, struct blk_zone_wplug *zwplug) { lockdep_assert_held(&zwplug->lock); /* If the zone write plug was already removed, we are done. */ if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) return false; /* If the zone write plug is still plugged, it cannot be removed. */ if (zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) return false; /* * Completions of BIOs with blk_zone_write_plug_bio_endio() may * happen after handling a request completion with * blk_zone_write_plug_finish_request() (e.g. with split BIOs * that are chained). In such case, disk_zone_wplug_unplug_bio() * should not attempt to remove the zone write plug until all BIO * completions are seen. Check by looking at the zone write plug * reference count, which is 2 when the plug is unused (one reference * taken when the plug was allocated and another reference taken by the * caller context). */ if (refcount_read(&zwplug->ref) > 2) return false; /* We can remove zone write plugs for zones that are empty or full. */ return !zwplug->wp_offset || disk_zone_wplug_is_full(disk, zwplug); } static void disk_remove_zone_wplug(struct gendisk *disk, struct blk_zone_wplug *zwplug) { unsigned long flags; /* If the zone write plug was already removed, we have nothing to do. */ if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) return; /* * Mark the zone write plug as unhashed and drop the extra reference we * took when the plug was inserted in the hash table. */ zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED; spin_lock_irqsave(&disk->zone_wplugs_lock, flags); hlist_del_init_rcu(&zwplug->node); atomic_dec(&disk->nr_zone_wplugs); spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); disk_put_zone_wplug(zwplug); } static void blk_zone_wplug_bio_work(struct work_struct *work); /* * Get a reference on the write plug for the zone containing @sector. * If the plug does not exist, it is allocated and hashed. * Return a pointer to the zone write plug with the plug spinlock held. */ static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk, sector_t sector, gfp_t gfp_mask, unsigned long *flags) { unsigned int zno = disk_zone_no(disk, sector); struct blk_zone_wplug *zwplug; again: zwplug = disk_get_zone_wplug(disk, sector); if (zwplug) { /* * Check that a BIO completion or a zone reset or finish * operation has not already removed the zone write plug from * the hash table and dropped its reference count. In such case, * we need to get a new plug so start over from the beginning. */ spin_lock_irqsave(&zwplug->lock, *flags); if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) { spin_unlock_irqrestore(&zwplug->lock, *flags); disk_put_zone_wplug(zwplug); goto again; } return zwplug; } /* * Allocate and initialize a zone write plug with an extra reference * so that it is not freed when the zone write plug becomes idle without * the zone being full. */ zwplug = mempool_alloc(disk->zone_wplugs_pool, gfp_mask); if (!zwplug) return NULL; INIT_HLIST_NODE(&zwplug->node); refcount_set(&zwplug->ref, 2); spin_lock_init(&zwplug->lock); zwplug->flags = 0; zwplug->zone_no = zno; zwplug->wp_offset = bdev_offset_from_zone_start(disk->part0, sector); bio_list_init(&zwplug->bio_list); INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work); zwplug->disk = disk; spin_lock_irqsave(&zwplug->lock, *flags); /* * Insert the new zone write plug in the hash table. This can fail only * if another context already inserted a plug. Retry from the beginning * in such case. */ if (!disk_insert_zone_wplug(disk, zwplug)) { spin_unlock_irqrestore(&zwplug->lock, *flags); mempool_free(zwplug, disk->zone_wplugs_pool); goto again; } return zwplug; } static inline void blk_zone_wplug_bio_io_error(struct blk_zone_wplug *zwplug, struct bio *bio) { struct request_queue *q = zwplug->disk->queue; bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING); bio_io_error(bio); disk_put_zone_wplug(zwplug); /* Drop the reference taken by disk_zone_wplug_add_bio(() */ blk_queue_exit(q); } /* * Abort (fail) all plugged BIOs of a zone write plug. */ static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug) { struct bio *bio; if (bio_list_empty(&zwplug->bio_list)) return; pr_warn_ratelimited("%s: zone %u: Aborting plugged BIOs\n", zwplug->disk->disk_name, zwplug->zone_no); while ((bio = bio_list_pop(&zwplug->bio_list))) blk_zone_wplug_bio_io_error(zwplug, bio); } /* * Set a zone write plug write pointer offset to the specified value. * This aborts all plugged BIOs, which is fine as this function is called for * a zone reset operation, a zone finish operation or if the zone needs a wp * update from a report zone after a write error. */ static void disk_zone_wplug_set_wp_offset(struct gendisk *disk, struct blk_zone_wplug *zwplug, unsigned int wp_offset) { lockdep_assert_held(&zwplug->lock); /* Update the zone write pointer and abort all plugged BIOs. */ zwplug->flags &= ~BLK_ZONE_WPLUG_NEED_WP_UPDATE; zwplug->wp_offset = wp_offset; disk_zone_wplug_abort(zwplug); /* * The zone write plug now has no BIO plugged: remove it from the * hash table so that it cannot be seen. The plug will be freed * when the last reference is dropped. */ if (disk_should_remove_zone_wplug(disk, zwplug)) disk_remove_zone_wplug(disk, zwplug); } static unsigned int blk_zone_wp_offset(struct blk_zone *zone) { switch (zone->cond) { case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_EXP_OPEN: case BLK_ZONE_COND_CLOSED: return zone->wp - zone->start; case BLK_ZONE_COND_FULL: return zone->len; case BLK_ZONE_COND_EMPTY: return 0; case BLK_ZONE_COND_NOT_WP: case BLK_ZONE_COND_OFFLINE: case BLK_ZONE_COND_READONLY: default: /* * Conventional, offline and read-only zones do not have a valid * write pointer. */ return UINT_MAX; } } static void disk_zone_wplug_sync_wp_offset(struct gendisk *disk, struct blk_zone *zone) { struct blk_zone_wplug *zwplug; unsigned long flags; zwplug = disk_get_zone_wplug(disk, zone->start); if (!zwplug) return; spin_lock_irqsave(&zwplug->lock, flags); if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE) disk_zone_wplug_set_wp_offset(disk, zwplug, blk_zone_wp_offset(zone)); spin_unlock_irqrestore(&zwplug->lock, flags); disk_put_zone_wplug(zwplug); } static int disk_zone_sync_wp_offset(struct gendisk *disk, sector_t sector) { struct disk_report_zones_cb_args args = { .disk = disk, }; return disk->fops->report_zones(disk, sector, 1, disk_report_zones_cb, &args); } static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio, unsigned int wp_offset) { struct gendisk *disk = bio->bi_bdev->bd_disk; sector_t sector = bio->bi_iter.bi_sector; struct blk_zone_wplug *zwplug; unsigned long flags; /* Conventional zones cannot be reset nor finished. */ if (!bdev_zone_is_seq(bio->bi_bdev, sector)) { bio_io_error(bio); return true; } /* * No-wait reset or finish BIOs do not make much sense as the callers * issue these as blocking operations in most cases. To avoid issues * the BIO execution potentially failing with BLK_STS_AGAIN, warn about * REQ_NOWAIT being set and ignore that flag. */ if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) bio->bi_opf &= ~REQ_NOWAIT; /* * If we have a zone write plug, set its write pointer offset to 0 * (reset case) or to the zone size (finish case). This will abort all * BIOs plugged for the target zone. It is fine as resetting or * finishing zones while writes are still in-flight will result in the * writes failing anyway. */ zwplug = disk_get_zone_wplug(disk, sector); if (zwplug) { spin_lock_irqsave(&zwplug->lock, flags); disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset); spin_unlock_irqrestore(&zwplug->lock, flags); disk_put_zone_wplug(zwplug); } return false; } static bool blk_zone_wplug_handle_reset_all(struct bio *bio) { struct gendisk *disk = bio->bi_bdev->bd_disk; struct blk_zone_wplug *zwplug; unsigned long flags; sector_t sector; /* * Set the write pointer offset of all zone write plugs to 0. This will * abort all plugged BIOs. It is fine as resetting zones while writes * are still in-flight will result in the writes failing anyway. */ for (sector = 0; sector < get_capacity(disk); sector += disk->queue->limits.chunk_sectors) { zwplug = disk_get_zone_wplug(disk, sector); if (zwplug) { spin_lock_irqsave(&zwplug->lock, flags); disk_zone_wplug_set_wp_offset(disk, zwplug, 0); spin_unlock_irqrestore(&zwplug->lock, flags); disk_put_zone_wplug(zwplug); } } return false; } static void disk_zone_wplug_schedule_bio_work(struct gendisk *disk, struct blk_zone_wplug *zwplug) { /* * Take a reference on the zone write plug and schedule the submission * of the next plugged BIO. blk_zone_wplug_bio_work() will release the * reference we take here. */ WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)); refcount_inc(&zwplug->ref); queue_work(disk->zone_wplugs_wq, &zwplug->bio_work); } static inline void disk_zone_wplug_add_bio(struct gendisk *disk, struct blk_zone_wplug *zwplug, struct bio *bio, unsigned int nr_segs) { bool schedule_bio_work = false; /* * Grab an extra reference on the BIO request queue usage counter. * This reference will be reused to submit a request for the BIO for * blk-mq devices and dropped when the BIO is failed and after * it is issued in the case of BIO-based devices. */ percpu_ref_get(&bio->bi_bdev->bd_disk->queue->q_usage_counter); /* * The BIO is being plugged and thus will have to wait for the on-going * write and for all other writes already plugged. So polling makes * no sense. */ bio_clear_polled(bio); /* * REQ_NOWAIT BIOs are always handled using the zone write plug BIO * work, which can block. So clear the REQ_NOWAIT flag and schedule the * work if this is the first BIO we are plugging. */ if (bio->bi_opf & REQ_NOWAIT) { schedule_bio_work = !(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED); bio->bi_opf &= ~REQ_NOWAIT; } /* * Reuse the poll cookie field to store the number of segments when * split to the hardware limits. */ bio->__bi_nr_segments = nr_segs; /* * We always receive BIOs after they are split and ready to be issued. * The block layer passes the parts of a split BIO in order, and the * user must also issue write sequentially. So simply add the new BIO * at the tail of the list to preserve the sequential write order. */ bio_list_add(&zwplug->bio_list, bio); trace_disk_zone_wplug_add_bio(zwplug->disk->queue, zwplug->zone_no, bio->bi_iter.bi_sector, bio_sectors(bio)); zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; if (schedule_bio_work) disk_zone_wplug_schedule_bio_work(disk, zwplug); } /* * Called from bio_attempt_back_merge() when a BIO was merged with a request. */ void blk_zone_write_plug_bio_merged(struct bio *bio) { struct blk_zone_wplug *zwplug; unsigned long flags; /* * If the BIO was already plugged, then we were called through * blk_zone_write_plug_init_request() -> blk_attempt_bio_merge(). * For this case, we already hold a reference on the zone write plug for * the BIO and blk_zone_write_plug_init_request() will handle the * zone write pointer offset update. */ if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING)) return; bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING); /* * Get a reference on the zone write plug of the target zone and advance * the zone write pointer offset. Given that this is a merge, we already * have at least one request and one BIO referencing the zone write * plug. So this should not fail. */ zwplug = disk_get_zone_wplug(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); if (WARN_ON_ONCE(!zwplug)) return; spin_lock_irqsave(&zwplug->lock, flags); zwplug->wp_offset += bio_sectors(bio); spin_unlock_irqrestore(&zwplug->lock, flags); } /* * Attempt to merge plugged BIOs with a newly prepared request for a BIO that * already went through zone write plugging (either a new BIO or one that was * unplugged). */ void blk_zone_write_plug_init_request(struct request *req) { sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req); struct request_queue *q = req->q; struct gendisk *disk = q->disk; struct blk_zone_wplug *zwplug = disk_get_zone_wplug(disk, blk_rq_pos(req)); unsigned long flags; struct bio *bio; if (WARN_ON_ONCE(!zwplug)) return; /* * Indicate that completion of this request needs to be handled with * blk_zone_write_plug_finish_request(), which will drop the reference * on the zone write plug we took above on entry to this function. */ req->rq_flags |= RQF_ZONE_WRITE_PLUGGING; if (blk_queue_nomerges(q)) return; /* * Walk through the list of plugged BIOs to check if they can be merged * into the back of the request. */ spin_lock_irqsave(&zwplug->lock, flags); while (!disk_zone_wplug_is_full(disk, zwplug)) { bio = bio_list_peek(&zwplug->bio_list); if (!bio) break; if (bio->bi_iter.bi_sector != req_back_sector || !blk_rq_merge_ok(req, bio)) break; WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE_ZEROES && !bio->__bi_nr_segments); bio_list_pop(&zwplug->bio_list); if (bio_attempt_back_merge(req, bio, bio->__bi_nr_segments) != BIO_MERGE_OK) { bio_list_add_head(&zwplug->bio_list, bio); break; } /* Drop the reference taken by disk_zone_wplug_add_bio(). */ blk_queue_exit(q); zwplug->wp_offset += bio_sectors(bio); req_back_sector += bio_sectors(bio); } spin_unlock_irqrestore(&zwplug->lock, flags); } /* * Check and prepare a BIO for submission by incrementing the write pointer * offset of its zone write plug and changing zone append operations into * regular write when zone append emulation is needed. */ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug, struct bio *bio) { struct gendisk *disk = bio->bi_bdev->bd_disk; lockdep_assert_held(&zwplug->lock); /* * If we lost track of the zone write pointer due to a write error, * the user must either execute a report zones, reset the zone or finish * the to recover a reliable write pointer position. Fail BIOs if the * user did not do that as we cannot handle emulated zone append * otherwise. */ if (zwplug->flags & BLK_ZONE_WPLUG_NEED_WP_UPDATE) return false; /* * Check that the user is not attempting to write to a full zone. * We know such BIO will fail, and that would potentially overflow our * write pointer offset beyond the end of the zone. */ if (disk_zone_wplug_is_full(disk, zwplug)) return false; if (bio_op(bio) == REQ_OP_ZONE_APPEND) { /* * Use a regular write starting at the current write pointer. * Similarly to native zone append operations, do not allow * merging. */ bio->bi_opf &= ~REQ_OP_MASK; bio->bi_opf |= REQ_OP_WRITE | REQ_NOMERGE; bio->bi_iter.bi_sector += zwplug->wp_offset; /* * Remember that this BIO is in fact a zone append operation * so that we can restore its operation code on completion. */ bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND); } else { /* * Check for non-sequential writes early as we know that BIOs * with a start sector not unaligned to the zone write pointer * will fail. */ if (bio_offset_from_zone_start(bio) != zwplug->wp_offset) return false; } /* Advance the zone write pointer offset. */ zwplug->wp_offset += bio_sectors(bio); return true; } static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) { struct gendisk *disk = bio->bi_bdev->bd_disk; sector_t sector = bio->bi_iter.bi_sector; struct blk_zone_wplug *zwplug; gfp_t gfp_mask = GFP_NOIO; unsigned long flags; /* * BIOs must be fully contained within a zone so that we use the correct * zone write plug for the entire BIO. For blk-mq devices, the block * layer should already have done any splitting required to ensure this * and this BIO should thus not be straddling zone boundaries. For * BIO-based devices, it is the responsibility of the driver to split * the bio before submitting it. */ if (WARN_ON_ONCE(bio_straddles_zones(bio))) { bio_io_error(bio); return true; } /* Conventional zones do not need write plugging. */ if (!bdev_zone_is_seq(bio->bi_bdev, sector)) { /* Zone append to conventional zones is not allowed. */ if (bio_op(bio) == REQ_OP_ZONE_APPEND) { bio_io_error(bio); return true; } return false; } if (bio->bi_opf & REQ_NOWAIT) gfp_mask = GFP_NOWAIT; zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags); if (!zwplug) { if (bio->bi_opf & REQ_NOWAIT) bio_wouldblock_error(bio); else bio_io_error(bio); return true; } /* Indicate that this BIO is being handled using zone write plugging. */ bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING); /* * If the zone is already plugged, add the BIO to the plug BIO list. * Do the same for REQ_NOWAIT BIOs to ensure that we will not see a * BLK_STS_AGAIN failure if we let the BIO execute. * Otherwise, plug and let the BIO execute. */ if ((zwplug->flags & BLK_ZONE_WPLUG_PLUGGED) || (bio->bi_opf & REQ_NOWAIT)) goto plug; if (!blk_zone_wplug_prepare_bio(zwplug, bio)) { spin_unlock_irqrestore(&zwplug->lock, flags); bio_io_error(bio); return true; } zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; spin_unlock_irqrestore(&zwplug->lock, flags); return false; plug: disk_zone_wplug_add_bio(disk, zwplug, bio, nr_segs); spin_unlock_irqrestore(&zwplug->lock, flags); return true; } static void blk_zone_wplug_handle_native_zone_append(struct bio *bio) { struct gendisk *disk = bio->bi_bdev->bd_disk; struct blk_zone_wplug *zwplug; unsigned long flags; /* * We have native support for zone append operations, so we are not * going to handle @bio through plugging. However, we may already have a * zone write plug for the target zone if that zone was previously * partially written using regular writes. In such case, we risk leaving * the plug in the disk hash table if the zone is fully written using * zone append operations. Avoid this by removing the zone write plug. */ zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector); if (likely(!zwplug)) return; spin_lock_irqsave(&zwplug->lock, flags); /* * We are about to remove the zone write plug. But if the user * (mistakenly) has issued regular writes together with native zone * append, we must aborts the writes as otherwise the plugged BIOs would * not be executed by the plug BIO work as disk_get_zone_wplug() will * return NULL after the plug is removed. Aborting the plugged write * BIOs is consistent with the fact that these writes will most likely * fail anyway as there is no ordering guarantees between zone append * operations and regular write operations. */ if (!bio_list_empty(&zwplug->bio_list)) { pr_warn_ratelimited("%s: zone %u: Invalid mix of zone append and regular writes\n", disk->disk_name, zwplug->zone_no); disk_zone_wplug_abort(zwplug); } disk_remove_zone_wplug(disk, zwplug); spin_unlock_irqrestore(&zwplug->lock, flags); disk_put_zone_wplug(zwplug); } /** * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging * @bio: The BIO being submitted * @nr_segs: The number of physical segments of @bio * * Handle write, write zeroes and zone append operations requiring emulation * using zone write plugging. * * Return true whenever @bio execution needs to be delayed through the zone * write plug. Otherwise, return false to let the submission path process * @bio normally. */ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) { struct block_device *bdev = bio->bi_bdev; if (WARN_ON_ONCE(!bdev->bd_disk->zone_wplugs_hash)) return false; /* * Regular writes and write zeroes need to be handled through the target * zone write plug. This includes writes with REQ_FUA | REQ_PREFLUSH * which may need to go through the flush machinery depending on the * target device capabilities. Plugging such writes is fine as the flush * machinery operates at the request level, below the plug, and * completion of the flush sequence will go through the regular BIO * completion, which will handle zone write plugging. * Zone append operations for devices that requested emulation must * also be plugged so that these BIOs can be changed into regular * write BIOs. * Zone reset, reset all and finish commands need special treatment * to correctly track the write pointer offset of zones. These commands * are not plugged as we do not need serialization with write * operations. It is the responsibility of the user to not issue reset * and finish commands when write operations are in flight. */ switch (bio_op(bio)) { case REQ_OP_ZONE_APPEND: if (!bdev_emulates_zone_append(bdev)) { blk_zone_wplug_handle_native_zone_append(bio); return false; } fallthrough; case REQ_OP_WRITE: case REQ_OP_WRITE_ZEROES: return blk_zone_wplug_handle_write(bio, nr_segs); case REQ_OP_ZONE_RESET: return blk_zone_wplug_handle_reset_or_finish(bio, 0); case REQ_OP_ZONE_FINISH: return blk_zone_wplug_handle_reset_or_finish(bio, bdev_zone_sectors(bdev)); case REQ_OP_ZONE_RESET_ALL: return blk_zone_wplug_handle_reset_all(bio); default: return false; } return false; } EXPORT_SYMBOL_GPL(blk_zone_plug_bio); static void disk_zone_wplug_unplug_bio(struct gendisk *disk, struct blk_zone_wplug *zwplug) { unsigned long flags; spin_lock_irqsave(&zwplug->lock, flags); /* Schedule submission of the next plugged BIO if we have one. */ if (!bio_list_empty(&zwplug->bio_list)) { disk_zone_wplug_schedule_bio_work(disk, zwplug); spin_unlock_irqrestore(&zwplug->lock, flags); return; } zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED; /* * If the zone is full (it was fully written or finished, or empty * (it was reset), remove its zone write plug from the hash table. */ if (disk_should_remove_zone_wplug(disk, zwplug)) disk_remove_zone_wplug(disk, zwplug); spin_unlock_irqrestore(&zwplug->lock, flags); } void blk_zone_append_update_request_bio(struct request *rq, struct bio *bio) { /* * For zone append requests, the request sector indicates the location * at which the BIO data was written. Return this value to the BIO * issuer through the BIO iter sector. * For plugged zone writes, which include emulated zone append, we need * the original BIO sector so that blk_zone_write_plug_bio_endio() can * lookup the zone write plug. */ bio->bi_iter.bi_sector = rq->__sector; trace_blk_zone_append_update_request_bio(rq); } void blk_zone_write_plug_bio_endio(struct bio *bio) { struct gendisk *disk = bio->bi_bdev->bd_disk; struct blk_zone_wplug *zwplug = disk_get_zone_wplug(disk, bio->bi_iter.bi_sector); unsigned long flags; if (WARN_ON_ONCE(!zwplug)) return; /* Make sure we do not see this BIO again by clearing the plug flag. */ bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING); /* * If this is a regular write emulating a zone append operation, * restore the original operation code. */ if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) { bio->bi_opf &= ~REQ_OP_MASK; bio->bi_opf |= REQ_OP_ZONE_APPEND; bio_clear_flag(bio, BIO_EMULATES_ZONE_APPEND); } /* * If the BIO failed, abort all plugged BIOs and mark the plug as * needing a write pointer update. */ if (bio->bi_status != BLK_STS_OK) { spin_lock_irqsave(&zwplug->lock, flags); disk_zone_wplug_abort(zwplug); zwplug->flags |= BLK_ZONE_WPLUG_NEED_WP_UPDATE; spin_unlock_irqrestore(&zwplug->lock, flags); } /* Drop the reference we took when the BIO was issued. */ disk_put_zone_wplug(zwplug); /* * For BIO-based devices, blk_zone_write_plug_finish_request() * is not called. So we need to schedule execution of the next * plugged BIO here. */ if (bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) disk_zone_wplug_unplug_bio(disk, zwplug); /* Drop the reference we took when entering this function. */ disk_put_zone_wplug(zwplug); } void blk_zone_write_plug_finish_request(struct request *req) { struct gendisk *disk = req->q->disk; struct blk_zone_wplug *zwplug; zwplug = disk_get_zone_wplug(disk, req->__sector); if (WARN_ON_ONCE(!zwplug)) return; req->rq_flags &= ~RQF_ZONE_WRITE_PLUGGING; /* * Drop the reference we took when the request was initialized in * blk_zone_write_plug_init_request(). */ disk_put_zone_wplug(zwplug); disk_zone_wplug_unplug_bio(disk, zwplug); /* Drop the reference we took when entering this function. */ disk_put_zone_wplug(zwplug); } static void blk_zone_wplug_bio_work(struct work_struct *work) { struct blk_zone_wplug *zwplug = container_of(work, struct blk_zone_wplug, bio_work); struct block_device *bdev; unsigned long flags; struct bio *bio; /* * Submit the next plugged BIO. If we do not have any, clear * the plugged flag. */ spin_lock_irqsave(&zwplug->lock, flags); again: bio = bio_list_pop(&zwplug->bio_list); if (!bio) { zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED; spin_unlock_irqrestore(&zwplug->lock, flags); goto put_zwplug; } trace_blk_zone_wplug_bio(zwplug->disk->queue, zwplug->zone_no, bio->bi_iter.bi_sector, bio_sectors(bio)); if (!blk_zone_wplug_prepare_bio(zwplug, bio)) { blk_zone_wplug_bio_io_error(zwplug, bio); goto again; } spin_unlock_irqrestore(&zwplug->lock, flags); bdev = bio->bi_bdev; /* * blk-mq devices will reuse the extra reference on the request queue * usage counter we took when the BIO was plugged, but the submission * path for BIO-based devices will not do that. So drop this extra * reference here. */ if (bdev_test_flag(bdev, BD_HAS_SUBMIT_BIO)) { bdev->bd_disk->fops->submit_bio(bio); blk_queue_exit(bdev->bd_disk->queue); } else { blk_mq_submit_bio(bio); } put_zwplug: /* Drop the reference we took in disk_zone_wplug_schedule_bio_work(). */ disk_put_zone_wplug(zwplug); } static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk) { return 1U << disk->zone_wplugs_hash_bits; } void disk_init_zone_resources(struct gendisk *disk) { spin_lock_init(&disk->zone_wplugs_lock); } /* * For the size of a disk zone write plug hash table, use the size of the * zone write plug mempool, which is the maximum of the disk open zones and * active zones limits. But do not exceed 4KB (512 hlist head entries), that is, * 9 bits. For a disk that has no limits, mempool size defaults to 128. */ #define BLK_ZONE_WPLUG_MAX_HASH_BITS 9 #define BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE 128 static int disk_alloc_zone_resources(struct gendisk *disk, unsigned int pool_size) { unsigned int i; atomic_set(&disk->nr_zone_wplugs, 0); disk->zone_wplugs_hash_bits = min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS); disk->zone_wplugs_hash = kcalloc(disk_zone_wplugs_hash_size(disk), sizeof(struct hlist_head), GFP_KERNEL); if (!disk->zone_wplugs_hash) return -ENOMEM; for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) INIT_HLIST_HEAD(&disk->zone_wplugs_hash[i]); disk->zone_wplugs_pool = mempool_create_kmalloc_pool(pool_size, sizeof(struct blk_zone_wplug)); if (!disk->zone_wplugs_pool) goto free_hash; disk->zone_wplugs_wq = alloc_workqueue("%s_zwplugs", WQ_MEM_RECLAIM | WQ_HIGHPRI, pool_size, disk->disk_name); if (!disk->zone_wplugs_wq) goto destroy_pool; return 0; destroy_pool: mempool_destroy(disk->zone_wplugs_pool); disk->zone_wplugs_pool = NULL; free_hash: kfree(disk->zone_wplugs_hash); disk->zone_wplugs_hash = NULL; disk->zone_wplugs_hash_bits = 0; return -ENOMEM; } static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk) { struct blk_zone_wplug *zwplug; unsigned int i; if (!disk->zone_wplugs_hash) return; /* Free all the zone write plugs we have. */ for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) { while (!hlist_empty(&disk->zone_wplugs_hash[i])) { zwplug = hlist_entry(disk->zone_wplugs_hash[i].first, struct blk_zone_wplug, node); refcount_inc(&zwplug->ref); disk_remove_zone_wplug(disk, zwplug); disk_put_zone_wplug(zwplug); } } WARN_ON_ONCE(atomic_read(&disk->nr_zone_wplugs)); kfree(disk->zone_wplugs_hash); disk->zone_wplugs_hash = NULL; disk->zone_wplugs_hash_bits = 0; } static unsigned int disk_set_conv_zones_bitmap(struct gendisk *disk, unsigned long *bitmap) { unsigned int nr_conv_zones = 0; unsigned long flags; spin_lock_irqsave(&disk->zone_wplugs_lock, flags); if (bitmap) nr_conv_zones = bitmap_weight(bitmap, disk->nr_zones); bitmap = rcu_replace_pointer(disk->conv_zones_bitmap, bitmap, lockdep_is_held(&disk->zone_wplugs_lock)); spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); kfree_rcu_mightsleep(bitmap); return nr_conv_zones; } void disk_free_zone_resources(struct gendisk *disk) { if (!disk->zone_wplugs_pool) return; if (disk->zone_wplugs_wq) { destroy_workqueue(disk->zone_wplugs_wq); disk->zone_wplugs_wq = NULL; } disk_destroy_zone_wplugs_hash_table(disk); /* * Wait for the zone write plugs to be RCU-freed before * destorying the mempool. */ rcu_barrier(); mempool_destroy(disk->zone_wplugs_pool); disk->zone_wplugs_pool = NULL; disk_set_conv_zones_bitmap(disk, NULL); disk->zone_capacity = 0; disk->last_zone_capacity = 0; disk->nr_zones = 0; } static inline bool disk_need_zone_resources(struct gendisk *disk) { /* * All mq zoned devices need zone resources so that the block layer * can automatically handle write BIO plugging. BIO-based device drivers * (e.g. DM devices) are normally responsible for handling zone write * ordering and do not need zone resources, unless the driver requires * zone append emulation. */ return queue_is_mq(disk->queue) || queue_emulates_zone_append(disk->queue); } static int disk_revalidate_zone_resources(struct gendisk *disk, unsigned int nr_zones) { struct queue_limits *lim = &disk->queue->limits; unsigned int pool_size; if (!disk_need_zone_resources(disk)) return 0; /* * If the device has no limit on the maximum number of open and active * zones, use BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE. */ pool_size = max(lim->max_open_zones, lim->max_active_zones); if (!pool_size) pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_zones); if (!disk->zone_wplugs_hash) return disk_alloc_zone_resources(disk, pool_size); return 0; } struct blk_revalidate_zone_args { struct gendisk *disk; unsigned long *conv_zones_bitmap; unsigned int nr_zones; unsigned int zone_capacity; unsigned int last_zone_capacity; sector_t sector; }; /* * Update the disk zone resources information and device queue limits. * The disk queue is frozen when this is executed. */ static int disk_update_zone_resources(struct gendisk *disk, struct blk_revalidate_zone_args *args) { struct request_queue *q = disk->queue; unsigned int nr_seq_zones, nr_conv_zones; unsigned int pool_size; struct queue_limits lim; disk->nr_zones = args->nr_zones; disk->zone_capacity = args->zone_capacity; disk->last_zone_capacity = args->last_zone_capacity; nr_conv_zones = disk_set_conv_zones_bitmap(disk, args->conv_zones_bitmap); if (nr_conv_zones >= disk->nr_zones) { pr_warn("%s: Invalid number of conventional zones %u / %u\n", disk->disk_name, nr_conv_zones, disk->nr_zones); return -ENODEV; } lim = queue_limits_start_update(q); /* * Some devices can advertize zone resource limits that are larger than * the number of sequential zones of the zoned block device, e.g. a * small ZNS namespace. For such case, assume that the zoned device has * no zone resource limits. */ nr_seq_zones = disk->nr_zones - nr_conv_zones; if (lim.max_open_zones >= nr_seq_zones) lim.max_open_zones = 0; if (lim.max_active_zones >= nr_seq_zones) lim.max_active_zones = 0; if (!disk->zone_wplugs_pool) goto commit; /* * If the device has no limit on the maximum number of open and active * zones, set its max open zone limit to the mempool size to indicate * to the user that there is a potential performance impact due to * dynamic zone write plug allocation when simultaneously writing to * more zones than the size of the mempool. */ pool_size = max(lim.max_open_zones, lim.max_active_zones); if (!pool_size) pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_seq_zones); mempool_resize(disk->zone_wplugs_pool, pool_size); if (!lim.max_open_zones && !lim.max_active_zones) { if (pool_size < nr_seq_zones) lim.max_open_zones = pool_size; else lim.max_open_zones = 0; } commit: return queue_limits_commit_update_frozen(q, &lim); } static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx, struct blk_revalidate_zone_args *args) { struct gendisk *disk = args->disk; if (zone->capacity != zone->len) { pr_warn("%s: Invalid conventional zone capacity\n", disk->disk_name); return -ENODEV; } if (disk_zone_is_last(disk, zone)) args->last_zone_capacity = zone->capacity; if (!disk_need_zone_resources(disk)) return 0; if (!args->conv_zones_bitmap) { args->conv_zones_bitmap = bitmap_zalloc(args->nr_zones, GFP_NOIO); if (!args->conv_zones_bitmap) return -ENOMEM; } set_bit(idx, args->conv_zones_bitmap); return 0; } static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx, struct blk_revalidate_zone_args *args) { struct gendisk *disk = args->disk; struct blk_zone_wplug *zwplug; unsigned int wp_offset; unsigned long flags; /* * Remember the capacity of the first sequential zone and check * if it is constant for all zones, ignoring the last zone as it can be * smaller. */ if (!args->zone_capacity) args->zone_capacity = zone->capacity; if (disk_zone_is_last(disk, zone)) { args->last_zone_capacity = zone->capacity; } else if (zone->capacity != args->zone_capacity) { pr_warn("%s: Invalid variable zone capacity\n", disk->disk_name); return -ENODEV; } /* * If the device needs zone append emulation, we need to track the * write pointer of all zones that are not empty nor full. So make sure * we have a zone write plug for such zone if the device has a zone * write plug hash table. */ if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash) return 0; disk_zone_wplug_sync_wp_offset(disk, zone); wp_offset = blk_zone_wp_offset(zone); if (!wp_offset || wp_offset >= zone->capacity) return 0; zwplug = disk_get_and_lock_zone_wplug(disk, zone->wp, GFP_NOIO, &flags); if (!zwplug) return -ENOMEM; spin_unlock_irqrestore(&zwplug->lock, flags); disk_put_zone_wplug(zwplug); return 0; } /* * Helper function to check the validity of zones of a zoned block device. */ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, void *data) { struct blk_revalidate_zone_args *args = data; struct gendisk *disk = args->disk; sector_t zone_sectors = disk->queue->limits.chunk_sectors; int ret; /* Check for bad zones and holes in the zone report */ if (zone->start != args->sector) { pr_warn("%s: Zone gap at sectors %llu..%llu\n", disk->disk_name, args->sector, zone->start); return -ENODEV; } if (zone->start >= get_capacity(disk) || !zone->len) { pr_warn("%s: Invalid zone start %llu, length %llu\n", disk->disk_name, zone->start, zone->len); return -ENODEV; } /* * All zones must have the same size, with the exception on an eventual * smaller last zone. */ if (!disk_zone_is_last(disk, zone)) { if (zone->len != zone_sectors) { pr_warn("%s: Invalid zoned device with non constant zone size\n", disk->disk_name); return -ENODEV; } } else if (zone->len > zone_sectors) { pr_warn("%s: Invalid zoned device with larger last zone size\n", disk->disk_name); return -ENODEV; } if (!zone->capacity || zone->capacity > zone->len) { pr_warn("%s: Invalid zone capacity\n", disk->disk_name); return -ENODEV; } /* Check zone type */ switch (zone->type) { case BLK_ZONE_TYPE_CONVENTIONAL: ret = blk_revalidate_conv_zone(zone, idx, args); break; case BLK_ZONE_TYPE_SEQWRITE_REQ: ret = blk_revalidate_seq_zone(zone, idx, args); break; case BLK_ZONE_TYPE_SEQWRITE_PREF: default: pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", disk->disk_name, (int)zone->type, zone->start); ret = -ENODEV; } if (!ret) args->sector += zone->len; return ret; } /** * blk_revalidate_disk_zones - (re)allocate and initialize zone write plugs * @disk: Target disk * * Helper function for low-level device drivers to check, (re) allocate and * initialize resources used for managing zoned disks. This function should * normally be called by blk-mq based drivers when a zoned gendisk is probed * and when the zone configuration of the gendisk changes (e.g. after a format). * Before calling this function, the device driver must already have set the * device zone size (chunk_sector limit) and the max zone append limit. * BIO based drivers can also use this function as long as the device queue * can be safely frozen. */ int blk_revalidate_disk_zones(struct gendisk *disk) { struct request_queue *q = disk->queue; sector_t zone_sectors = q->limits.chunk_sectors; sector_t capacity = get_capacity(disk); struct blk_revalidate_zone_args args = { }; unsigned int noio_flag; int ret = -ENOMEM; if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) return -EIO; if (!capacity) return -ENODEV; /* * Checks that the device driver indicated a valid zone size and that * the max zone append limit is set. */ if (!zone_sectors || !is_power_of_2(zone_sectors)) { pr_warn("%s: Invalid non power of two zone size (%llu)\n", disk->disk_name, zone_sectors); return -ENODEV; } /* * Ensure that all memory allocations in this context are done as if * GFP_NOIO was specified. */ args.disk = disk; args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors); noio_flag = memalloc_noio_save(); ret = disk_revalidate_zone_resources(disk, args.nr_zones); if (ret) { memalloc_noio_restore(noio_flag); return ret; } ret = disk->fops->report_zones(disk, 0, UINT_MAX, blk_revalidate_zone_cb, &args); if (!ret) { pr_warn("%s: No zones reported\n", disk->disk_name); ret = -ENODEV; } memalloc_noio_restore(noio_flag); /* * If zones where reported, make sure that the entire disk capacity * has been checked. */ if (ret > 0 && args.sector != capacity) { pr_warn("%s: Missing zones from sector %llu\n", disk->disk_name, args.sector); ret = -ENODEV; } /* * Set the new disk zone parameters only once the queue is frozen and * all I/Os are completed. */ if (ret > 0) ret = disk_update_zone_resources(disk, &args); else pr_warn("%s: failed to revalidate zones\n", disk->disk_name); if (ret) { unsigned int memflags = blk_mq_freeze_queue(q); disk_free_zone_resources(disk); blk_mq_unfreeze_queue(q, memflags); } return ret; } EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); /** * blk_zone_issue_zeroout - zero-fill a block range in a zone * @bdev: blockdev to write * @sector: start sector * @nr_sects: number of sectors to write * @gfp_mask: memory allocation flags (for bio_alloc) * * Description: * Zero-fill a block range in a zone (@sector must be equal to the zone write * pointer), handling potential errors due to the (initially unknown) lack of * hardware offload (See blkdev_issue_zeroout()). */ int blk_zone_issue_zeroout(struct block_device *bdev, sector_t sector, sector_t nr_sects, gfp_t gfp_mask) { int ret; if (WARN_ON_ONCE(!bdev_is_zoned(bdev))) return -EIO; ret = blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, BLKDEV_ZERO_NOFALLBACK); if (ret != -EOPNOTSUPP) return ret; /* * The failed call to blkdev_issue_zeroout() advanced the zone write * pointer. Undo this using a report zone to update the zone write * pointer to the correct current value. */ ret = disk_zone_sync_wp_offset(bdev->bd_disk, sector); if (ret != 1) return ret < 0 ? ret : -EIO; /* * Retry without BLKDEV_ZERO_NOFALLBACK to force the fallback to a * regular write with zero-pages. */ return blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, 0); } EXPORT_SYMBOL_GPL(blk_zone_issue_zeroout); #ifdef CONFIG_BLK_DEBUG_FS static void queue_zone_wplug_show(struct blk_zone_wplug *zwplug, struct seq_file *m) { unsigned int zwp_wp_offset, zwp_flags; unsigned int zwp_zone_no, zwp_ref; unsigned int zwp_bio_list_size; unsigned long flags; spin_lock_irqsave(&zwplug->lock, flags); zwp_zone_no = zwplug->zone_no; zwp_flags = zwplug->flags; zwp_ref = refcount_read(&zwplug->ref); zwp_wp_offset = zwplug->wp_offset; zwp_bio_list_size = bio_list_size(&zwplug->bio_list); spin_unlock_irqrestore(&zwplug->lock, flags); seq_printf(m, "%u 0x%x %u %u %u\n", zwp_zone_no, zwp_flags, zwp_ref, zwp_wp_offset, zwp_bio_list_size); } int queue_zone_wplugs_show(void *data, struct seq_file *m) { struct request_queue *q = data; struct gendisk *disk = q->disk; struct blk_zone_wplug *zwplug; unsigned int i; if (!disk->zone_wplugs_hash) return 0; rcu_read_lock(); for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[i], node) queue_zone_wplug_show(zwplug, m); rcu_read_unlock(); return 0; } #endif |
| 1 1 1 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 | // SPDX-License-Identifier: GPL-2.0-or-later /* cx231xx_avcore.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> This program contains the specific code to control the avdecoder chip and other related usb control functions for cx231xx based chipset. */ #include "cx231xx.h" #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitmap.h> #include <linux/i2c.h> #include <linux/mm.h> #include <linux/mutex.h> #include <media/tuner.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include "cx231xx-dif.h" #define TUNER_MODE_FM_RADIO 0 /****************************************************************************** -: BLOCK ARRANGEMENT :- I2S block ----------------------| [I2S audio] | | Analog Front End --> Direct IF -|-> Cx25840 --> Audio [video & audio] | [Audio] | |-> Cx25840 --> Video [Video] *******************************************************************************/ /****************************************************************************** * VERVE REGISTER * * * ******************************************************************************/ static int verve_write_byte(struct cx231xx *dev, u8 saddr, u8 data) { return cx231xx_write_i2c_data(dev, VERVE_I2C_ADDRESS, saddr, 1, data, 1); } static int verve_read_byte(struct cx231xx *dev, u8 saddr, u8 *data) { int status; u32 temp = 0; status = cx231xx_read_i2c_data(dev, VERVE_I2C_ADDRESS, saddr, 1, &temp, 1); *data = (u8) temp; return status; } void initGPIO(struct cx231xx *dev) { u32 _gpio_direction = 0; u32 value = 0; u8 val = 0; _gpio_direction = _gpio_direction & 0xFC0003FF; _gpio_direction = _gpio_direction | 0x03FDFC00; cx231xx_send_gpio_cmd(dev, _gpio_direction, (u8 *)&value, 4, 0, 0); verve_read_byte(dev, 0x07, &val); dev_dbg(dev->dev, "verve_read_byte address0x07=0x%x\n", val); verve_write_byte(dev, 0x07, 0xF4); verve_read_byte(dev, 0x07, &val); dev_dbg(dev->dev, "verve_read_byte address0x07=0x%x\n", val); cx231xx_capture_start(dev, 1, Vbi); cx231xx_mode_register(dev, EP_MODE_SET, 0x0500FE00); cx231xx_mode_register(dev, GBULK_BIT_EN, 0xFFFDFFFF); } void uninitGPIO(struct cx231xx *dev) { u8 value[4] = { 0, 0, 0, 0 }; cx231xx_capture_start(dev, 0, Vbi); verve_write_byte(dev, 0x07, 0x14); cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, 0x68, value, 4); } /****************************************************************************** * A F E - B L O C K C O N T R O L functions * * [ANALOG FRONT END] * ******************************************************************************/ static int afe_write_byte(struct cx231xx *dev, u16 saddr, u8 data) { return cx231xx_write_i2c_data(dev, AFE_DEVICE_ADDRESS, saddr, 2, data, 1); } static int afe_read_byte(struct cx231xx *dev, u16 saddr, u8 *data) { int status; u32 temp = 0; status = cx231xx_read_i2c_data(dev, AFE_DEVICE_ADDRESS, saddr, 2, &temp, 1); *data = (u8) temp; return status; } int cx231xx_afe_init_super_block(struct cx231xx *dev, u32 ref_count) { int status = 0; u8 temp = 0; u8 afe_power_status = 0; int i = 0; /* super block initialize */ temp = (u8) (ref_count & 0xff); status = afe_write_byte(dev, SUP_BLK_TUNE2, temp); if (status < 0) return status; status = afe_read_byte(dev, SUP_BLK_TUNE2, &afe_power_status); if (status < 0) return status; temp = (u8) ((ref_count & 0x300) >> 8); temp |= 0x40; status = afe_write_byte(dev, SUP_BLK_TUNE1, temp); if (status < 0) return status; status = afe_write_byte(dev, SUP_BLK_PLL2, 0x0f); if (status < 0) return status; /* enable pll */ while (afe_power_status != 0x18) { status = afe_write_byte(dev, SUP_BLK_PWRDN, 0x18); if (status < 0) { dev_dbg(dev->dev, "%s: Init Super Block failed in send cmd\n", __func__); break; } status = afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); afe_power_status &= 0xff; if (status < 0) { dev_dbg(dev->dev, "%s: Init Super Block failed in receive cmd\n", __func__); break; } i++; if (i == 10) { dev_dbg(dev->dev, "%s: Init Super Block force break in loop !!!!\n", __func__); status = -1; break; } } if (status < 0) return status; /* start tuning filter */ status = afe_write_byte(dev, SUP_BLK_TUNE3, 0x40); if (status < 0) return status; msleep(5); /* exit tuning */ status = afe_write_byte(dev, SUP_BLK_TUNE3, 0x00); return status; } int cx231xx_afe_init_channels(struct cx231xx *dev) { int status = 0; /* power up all 3 channels, clear pd_buffer */ status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); /* Enable quantizer calibration */ status = afe_write_byte(dev, ADC_COM_QUANT, 0x02); /* channel initialize, force modulator (fb) reset */ status = afe_write_byte(dev, ADC_FB_FRCRST_CH1, 0x17); status = afe_write_byte(dev, ADC_FB_FRCRST_CH2, 0x17); status = afe_write_byte(dev, ADC_FB_FRCRST_CH3, 0x17); /* start quantilizer calibration */ status = afe_write_byte(dev, ADC_CAL_ATEST_CH1, 0x10); status = afe_write_byte(dev, ADC_CAL_ATEST_CH2, 0x10); status = afe_write_byte(dev, ADC_CAL_ATEST_CH3, 0x10); msleep(5); /* exit modulator (fb) reset */ status = afe_write_byte(dev, ADC_FB_FRCRST_CH1, 0x07); status = afe_write_byte(dev, ADC_FB_FRCRST_CH2, 0x07); status = afe_write_byte(dev, ADC_FB_FRCRST_CH3, 0x07); /* enable the pre_clamp in each channel for single-ended input */ status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH1, 0xf0); status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH2, 0xf0); status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, 0xf0); /* use diode instead of resistor, so set term_en to 0, res_en to 0 */ status = cx231xx_reg_mask_write(dev, AFE_DEVICE_ADDRESS, 8, ADC_QGAIN_RES_TRM_CH1, 3, 7, 0x00); status = cx231xx_reg_mask_write(dev, AFE_DEVICE_ADDRESS, 8, ADC_QGAIN_RES_TRM_CH2, 3, 7, 0x00); status = cx231xx_reg_mask_write(dev, AFE_DEVICE_ADDRESS, 8, ADC_QGAIN_RES_TRM_CH3, 3, 7, 0x00); /* dynamic element matching off */ status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH1, 0x03); status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH2, 0x03); status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, 0x03); return status; } int cx231xx_afe_setup_AFE_for_baseband(struct cx231xx *dev) { u8 c_value = 0; int status = 0; status = afe_read_byte(dev, ADC_PWRDN_CLAMP_CH2, &c_value); c_value &= (~(0x50)); status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, c_value); return status; } /* The Analog Front End in Cx231xx has 3 channels. These channels are used to share between different inputs like tuner, s-video and composite inputs. channel 1 ----- pin 1 to pin4(in reg is 1-4) channel 2 ----- pin 5 to pin8(in reg is 5-8) channel 3 ----- pin 9 to pin 12(in reg is 9-11) */ int cx231xx_afe_set_input_mux(struct cx231xx *dev, u32 input_mux) { u8 ch1_setting = (u8) input_mux; u8 ch2_setting = (u8) (input_mux >> 8); u8 ch3_setting = (u8) (input_mux >> 16); int status = 0; u8 value = 0; if (ch1_setting != 0) { status = afe_read_byte(dev, ADC_INPUT_CH1, &value); value &= ~INPUT_SEL_MASK; value |= (ch1_setting - 1) << 4; value &= 0xff; status = afe_write_byte(dev, ADC_INPUT_CH1, value); } if (ch2_setting != 0) { status = afe_read_byte(dev, ADC_INPUT_CH2, &value); value &= ~INPUT_SEL_MASK; value |= (ch2_setting - 1) << 4; value &= 0xff; status = afe_write_byte(dev, ADC_INPUT_CH2, value); } /* For ch3_setting, the value to put in the register is 7 less than the input number */ if (ch3_setting != 0) { status = afe_read_byte(dev, ADC_INPUT_CH3, &value); value &= ~INPUT_SEL_MASK; value |= (ch3_setting - 1) << 4; value &= 0xff; status = afe_write_byte(dev, ADC_INPUT_CH3, value); } return status; } int cx231xx_afe_set_mode(struct cx231xx *dev, enum AFE_MODE mode) { int status = 0; /* * FIXME: We need to implement the AFE code for LOW IF and for HI IF. * Currently, only baseband works. */ switch (mode) { case AFE_MODE_LOW_IF: cx231xx_Setup_AFE_for_LowIF(dev); break; case AFE_MODE_BASEBAND: status = cx231xx_afe_setup_AFE_for_baseband(dev); break; case AFE_MODE_EU_HI_IF: /* SetupAFEforEuHiIF(); */ break; case AFE_MODE_US_HI_IF: /* SetupAFEforUsHiIF(); */ break; case AFE_MODE_JAPAN_HI_IF: /* SetupAFEforJapanHiIF(); */ break; } if ((mode != dev->afe_mode) && (dev->video_input == CX231XX_VMUX_TELEVISION)) status = cx231xx_afe_adjust_ref_count(dev, CX231XX_VMUX_TELEVISION); dev->afe_mode = mode; return status; } int cx231xx_afe_update_power_control(struct cx231xx *dev, enum AV_MODE avmode) { u8 afe_power_status = 0; int status = 0; switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_CNXT_VIDEO_GRABBER: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_HAUPPAUGE_930C_HD_1113xx: case CX231XX_BOARD_HAUPPAUGE_USBLIVE2: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: case CX231XX_BOARD_OTG102: if (avmode == POLARIS_AVMODE_ANALOGT_TV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); } else if (avmode == POLARIS_AVMODE_DIGITAL) { status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x70); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); afe_power_status |= FLD_PWRDN_PD_BANDGAP | FLD_PWRDN_PD_BIAS | FLD_PWRDN_PD_TUNECK; status |= afe_write_byte(dev, SUP_BLK_PWRDN, afe_power_status); } else if (avmode == POLARIS_AVMODE_ENXTERNAL_AV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); } else { dev_dbg(dev->dev, "Invalid AV mode input\n"); status = -1; } break; default: if (avmode == POLARIS_AVMODE_ANALOGT_TV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x40); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x40); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); } else if (avmode == POLARIS_AVMODE_DIGITAL) { status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x70); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); afe_power_status |= FLD_PWRDN_PD_BANDGAP | FLD_PWRDN_PD_BIAS | FLD_PWRDN_PD_TUNECK; status |= afe_write_byte(dev, SUP_BLK_PWRDN, afe_power_status); } else if (avmode == POLARIS_AVMODE_ENXTERNAL_AV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x40); } else { dev_dbg(dev->dev, "Invalid AV mode input\n"); status = -1; } } /* switch */ return status; } int cx231xx_afe_adjust_ref_count(struct cx231xx *dev, u32 video_input) { u8 input_mode = 0; u8 ntf_mode = 0; int status = 0; dev->video_input = video_input; if (video_input == CX231XX_VMUX_TELEVISION) { status = afe_read_byte(dev, ADC_INPUT_CH3, &input_mode); status = afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &ntf_mode); } else { status = afe_read_byte(dev, ADC_INPUT_CH1, &input_mode); status = afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH1, &ntf_mode); } input_mode = (ntf_mode & 0x3) | ((input_mode & 0x6) << 1); switch (input_mode) { case SINGLE_ENDED: dev->afe_ref_count = 0x23C; break; case LOW_IF: dev->afe_ref_count = 0x24C; break; case EU_IF: dev->afe_ref_count = 0x258; break; case US_IF: dev->afe_ref_count = 0x260; break; default: break; } status = cx231xx_afe_init_super_block(dev, dev->afe_ref_count); return status; } /****************************************************************************** * V I D E O / A U D I O D E C O D E R C O N T R O L functions * ******************************************************************************/ static int vid_blk_write_byte(struct cx231xx *dev, u16 saddr, u8 data) { return cx231xx_write_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, data, 1); } static int vid_blk_read_byte(struct cx231xx *dev, u16 saddr, u8 *data) { int status; u32 temp = 0; status = cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, &temp, 1); *data = (u8) temp; return status; } static int vid_blk_write_word(struct cx231xx *dev, u16 saddr, u32 data) { return cx231xx_write_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, data, 4); } static int vid_blk_read_word(struct cx231xx *dev, u16 saddr, u32 *data) { return cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, data, 4); } int cx231xx_check_fw(struct cx231xx *dev) { u8 temp = 0; int status = 0; status = vid_blk_read_byte(dev, DL_CTL_ADDRESS_LOW, &temp); if (status < 0) return status; else return temp; } int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input) { int status = 0; switch (INPUT(input)->type) { case CX231XX_VMUX_COMPOSITE1: case CX231XX_VMUX_SVIDEO: if ((dev->current_pcb_config.type == USB_BUS_POWER) && (dev->power_mode != POLARIS_AVMODE_ENXTERNAL_AV)) { /* External AV */ status = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ENXTERNAL_AV); if (status < 0) { dev_err(dev->dev, "%s: Failed to set Power - errCode [%d]!\n", __func__, status); return status; } } status = cx231xx_set_decoder_video_input(dev, INPUT(input)->type, INPUT(input)->vmux); break; case CX231XX_VMUX_TELEVISION: case CX231XX_VMUX_CABLE: if ((dev->current_pcb_config.type == USB_BUS_POWER) && (dev->power_mode != POLARIS_AVMODE_ANALOGT_TV)) { /* Tuner */ status = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV); if (status < 0) { dev_err(dev->dev, "%s: Failed to set Power - errCode [%d]!\n", __func__, status); return status; } } switch (dev->model) { /* i2c device tuners */ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx: case CX231XX_BOARD_HAUPPAUGE_935C: case CX231XX_BOARD_HAUPPAUGE_955Q: case CX231XX_BOARD_HAUPPAUGE_975: case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD: status = cx231xx_set_decoder_video_input(dev, CX231XX_VMUX_TELEVISION, INPUT(input)->vmux); break; default: if (dev->tuner_type == TUNER_NXP_TDA18271) status = cx231xx_set_decoder_video_input(dev, CX231XX_VMUX_TELEVISION, INPUT(input)->vmux); else status = cx231xx_set_decoder_video_input(dev, CX231XX_VMUX_COMPOSITE1, INPUT(input)->vmux); break; } break; default: dev_err(dev->dev, "%s: Unknown Input %d !\n", __func__, INPUT(input)->type); break; } /* save the selection */ dev->video_input = input; return status; } int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u32 input) { int status = 0; u32 value = 0; if (pin_type != dev->video_input) { status = cx231xx_afe_adjust_ref_count(dev, pin_type); if (status < 0) { dev_err(dev->dev, "%s: adjust_ref_count :Failed to set AFE input mux - errCode [%d]!\n", __func__, status); return status; } } /* call afe block to set video inputs */ status = cx231xx_afe_set_input_mux(dev, input); if (status < 0) { dev_err(dev->dev, "%s: set_input_mux :Failed to set AFE input mux - errCode [%d]!\n", __func__, status); return status; } switch (pin_type) { case CX231XX_VMUX_COMPOSITE1: status = vid_blk_read_word(dev, AFE_CTRL, &value); value |= (0 << 13) | (1 << 4); value &= ~(1 << 5); /* set [24:23] [22:15] to 0 */ value &= (~(0x1ff8000)); /* set FUNC_MODE[24:23] = 2 IF_MOD[22:15] = 0 */ value |= 0x1000000; status = vid_blk_write_word(dev, AFE_CTRL, value); status = vid_blk_read_word(dev, OUT_CTRL1, &value); value |= (1 << 7); status = vid_blk_write_word(dev, OUT_CTRL1, value); /* Set output mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_OUT_MODE, dev->board.output_mode); /* Tell DIF object to go to baseband mode */ status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (status < 0) { dev_err(dev->dev, "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n", __func__, status); return status; } /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* enable the VBI_GATE_EN */ value |= FLD_VBI_GATE_EN; /* Enable the auto-VGA enable */ value |= FLD_VGA_AUTO_EN; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set CVBS input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_CVBS_0)); break; case CX231XX_VMUX_SVIDEO: /* Disable the use of DIF */ status = vid_blk_read_word(dev, AFE_CTRL, &value); /* set [24:23] [22:15] to 0 */ value &= (~(0x1ff8000)); /* set FUNC_MODE[24:23] = 2 IF_MOD[22:15] = 0 DCR_BYP_CH2[4:4] = 1; */ value |= 0x1000010; status = vid_blk_write_word(dev, AFE_CTRL, value); /* Tell DIF object to go to baseband mode */ status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (status < 0) { dev_err(dev->dev, "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n", __func__, status); return status; } /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* enable the VBI_GATE_EN */ value |= FLD_VBI_GATE_EN; /* Enable the auto-VGA enable */ value |= FLD_VGA_AUTO_EN; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set YC input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_YC_1)); /* Chroma to ADC2 */ status = vid_blk_read_word(dev, AFE_CTRL, &value); value |= FLD_CHROMA_IN_SEL; /* set the chroma in select */ /* Clear VGA_SEL_CH2 and VGA_SEL_CH3 (bits 7 and 8) This sets them to use video rather than audio. Only one of the two will be in use. */ value &= ~(FLD_VGA_SEL_CH2 | FLD_VGA_SEL_CH3); status = vid_blk_write_word(dev, AFE_CTRL, value); status = cx231xx_afe_set_mode(dev, AFE_MODE_BASEBAND); break; case CX231XX_VMUX_TELEVISION: case CX231XX_VMUX_CABLE: default: /* TODO: Test if this is also needed for xc2028/xc3028 */ if (dev->board.tuner_type == TUNER_XC5000) { /* Disable the use of DIF */ status = vid_blk_read_word(dev, AFE_CTRL, &value); value |= (0 << 13) | (1 << 4); value &= ~(1 << 5); /* set [24:23] [22:15] to 0 */ value &= (~(0x1FF8000)); /* set FUNC_MODE[24:23] = 2 IF_MOD[22:15] = 0 */ value |= 0x1000000; status = vid_blk_write_word(dev, AFE_CTRL, value); status = vid_blk_read_word(dev, OUT_CTRL1, &value); value |= (1 << 7); status = vid_blk_write_word(dev, OUT_CTRL1, value); /* Set output mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_OUT_MODE, dev->board.output_mode); /* Tell DIF object to go to baseband mode */ status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (status < 0) { dev_err(dev->dev, "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n", __func__, status); return status; } /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* enable the VBI_GATE_EN */ value |= FLD_VBI_GATE_EN; /* Enable the auto-VGA enable */ value |= FLD_VGA_AUTO_EN; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set CVBS input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_CVBS_0)); } else { /* Enable the DIF for the tuner */ /* Reinitialize the DIF */ status = cx231xx_dif_set_standard(dev, dev->norm); if (status < 0) { dev_err(dev->dev, "%s: cx231xx_dif set to By pass mode- errCode [%d]!\n", __func__, status); return status; } /* Make sure bypass is cleared */ status = vid_blk_read_word(dev, DIF_MISC_CTRL, &value); /* Clear the bypass bit */ value &= ~FLD_DIF_DIF_BYPASS; /* Enable the use of the DIF block */ status = vid_blk_write_word(dev, DIF_MISC_CTRL, value); /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* Disable the VBI_GATE_EN */ value &= ~FLD_VBI_GATE_EN; /* Enable the auto-VGA enable, AGC, and set the skip count to 2 */ value |= FLD_VGA_AUTO_EN | FLD_AGC_AUTO_EN | 0x00200000; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Wait until AGC locks up */ msleep(1); /* Disable the auto-VGA enable AGC */ value &= ~(FLD_VGA_AUTO_EN); /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Enable Polaris B0 AGC output */ status = vid_blk_read_word(dev, PIN_CTRL, &value); value |= (FLD_OEF_AGC_RF) | (FLD_OEF_AGC_IFVGA) | (FLD_OEF_AGC_IF); status = vid_blk_write_word(dev, PIN_CTRL, value); /* Set output mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_OUT_MODE, dev->board.output_mode); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set CVBS input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_CVBS_0)); /* Set some bits in AFE_CTRL so that channel 2 or 3 * is ready to receive audio */ /* Clear clamp for channels 2 and 3 (bit 16-17) */ /* Clear droop comp (bit 19-20) */ /* Set VGA_SEL (for audio control) (bit 7-8) */ status = vid_blk_read_word(dev, AFE_CTRL, &value); /*Set Func mode:01-DIF 10-baseband 11-YUV*/ value &= (~(FLD_FUNC_MODE)); value |= 0x800000; value |= FLD_VGA_SEL_CH3 | FLD_VGA_SEL_CH2; status = vid_blk_write_word(dev, AFE_CTRL, value); if (dev->tuner_type == TUNER_NXP_TDA18271) { status = vid_blk_read_word(dev, PIN_CTRL, &value); status = vid_blk_write_word(dev, PIN_CTRL, (value & 0xFFFFFFEF)); } break; } break; } /* Set raw VBI mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_VBIHACTRAW_EN, cx231xx_set_field(FLD_VBIHACTRAW_EN, 1)); status = vid_blk_read_word(dev, OUT_CTRL1, &value); if (value & 0x02) { value |= (1 << 19); status = vid_blk_write_word(dev, OUT_CTRL1, value); } return status; } void cx231xx_enable656(struct cx231xx *dev) { u8 temp = 0; /*enable TS1 data[0:7] as output to export 656*/ vid_blk_write_byte(dev, TS1_PIN_CTL0, 0xFF); /*enable TS1 clock as output to export 656*/ vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp); temp = temp|0x04; vid_blk_write_byte(dev, TS1_PIN_CTL1, temp); } EXPORT_SYMBOL_GPL(cx231xx_enable656); void cx231xx_disable656(struct cx231xx *dev) { u8 temp = 0; vid_blk_write_byte(dev, TS1_PIN_CTL0, 0x00); vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp); temp = temp&0xFB; vid_blk_write_byte(dev, TS1_PIN_CTL1, temp); } EXPORT_SYMBOL_GPL(cx231xx_disable656); /* * Handle any video-mode specific overrides that are different * on a per video standards basis after touching the MODE_CTRL * register which resets many values for autodetect */ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev) { int status = 0; dev_dbg(dev->dev, "%s: 0x%x\n", __func__, (unsigned int)dev->norm); /* Change the DFE_CTRL3 bp_percent to fix flagging */ status = vid_blk_write_word(dev, DFE_CTRL3, 0xCD3F0280); if (dev->norm & (V4L2_STD_NTSC | V4L2_STD_PAL_M)) { dev_dbg(dev->dev, "%s: NTSC\n", __func__); /* Move the close caption lines out of active video, adjust the active video start point */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VBLANK_CNT, 0x18); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VACTIVE_CNT, 0x1E7000); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_V656BLANK_CNT, 0x1C000000); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, HORIZ_TIM_CTRL, FLD_HBLANK_CNT, cx231xx_set_field (FLD_HBLANK_CNT, 0x79)); } else if (dev->norm & V4L2_STD_SECAM) { dev_dbg(dev->dev, "%s: SECAM\n", __func__); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VBLANK_CNT, 0x20); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VACTIVE_CNT, cx231xx_set_field (FLD_VACTIVE_CNT, 0x244)); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_V656BLANK_CNT, cx231xx_set_field (FLD_V656BLANK_CNT, 0x24)); /* Adjust the active video horizontal start point */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, HORIZ_TIM_CTRL, FLD_HBLANK_CNT, cx231xx_set_field (FLD_HBLANK_CNT, 0x85)); } else { dev_dbg(dev->dev, "%s: PAL\n", __func__); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VBLANK_CNT, 0x20); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VACTIVE_CNT, cx231xx_set_field (FLD_VACTIVE_CNT, 0x244)); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_V656BLANK_CNT, cx231xx_set_field (FLD_V656BLANK_CNT, 0x24)); /* Adjust the active video horizontal start point */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, HORIZ_TIM_CTRL, FLD_HBLANK_CNT, cx231xx_set_field (FLD_HBLANK_CNT, 0x85)); } return status; } int cx231xx_unmute_audio(struct cx231xx *dev) { return vid_blk_write_byte(dev, PATH1_VOL_CTL, 0x24); } EXPORT_SYMBOL_GPL(cx231xx_unmute_audio); static int stopAudioFirmware(struct cx231xx *dev) { return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x03); } static int restartAudioFirmware(struct cx231xx *dev) { return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x13); } int cx231xx_set_audio_input(struct cx231xx *dev, u8 input) { int status = 0; enum AUDIO_INPUT ainput = AUDIO_INPUT_LINE; switch (INPUT(input)->amux) { case CX231XX_AMUX_VIDEO: ainput = AUDIO_INPUT_TUNER_TV; break; case CX231XX_AMUX_LINE_IN: status = cx231xx_i2s_blk_set_audio_input(dev, input); ainput = AUDIO_INPUT_LINE; break; default: break; } status = cx231xx_set_audio_decoder_input(dev, ainput); return status; } int cx231xx_set_audio_decoder_input(struct cx231xx *dev, enum AUDIO_INPUT audio_input) { u32 dwval; int status; u8 gen_ctrl; u32 value = 0; /* Put it in soft reset */ status = vid_blk_read_byte(dev, GENERAL_CTL, &gen_ctrl); gen_ctrl |= 1; status = vid_blk_write_byte(dev, GENERAL_CTL, gen_ctrl); switch (audio_input) { case AUDIO_INPUT_LINE: /* setup AUD_IO control from Merlin paralle output */ value = cx231xx_set_field(FLD_AUD_CHAN1_SRC, AUD_CHAN_SRC_PARALLEL); status = vid_blk_write_word(dev, AUD_IO_CTRL, value); /* setup input to Merlin, SRC2 connect to AC97 bypass upsample-by-2, slave mode, sony mode, left justify adr 091c, dat 01000000 */ status = vid_blk_read_word(dev, AC97_CTL, &dwval); status = vid_blk_write_word(dev, AC97_CTL, (dwval | FLD_AC97_UP2X_BYPASS)); /* select the parallel1 and SRC3 */ status = vid_blk_write_word(dev, BAND_OUT_SEL, cx231xx_set_field(FLD_SRC3_IN_SEL, 0x0) | cx231xx_set_field(FLD_SRC3_CLK_SEL, 0x0) | cx231xx_set_field(FLD_PARALLEL1_SRC_SEL, 0x0)); /* unmute all, AC97 in, independence mode adr 08d0, data 0x00063073 */ status = vid_blk_write_word(dev, DL_CTL, 0x3000001); status = vid_blk_write_word(dev, PATH1_CTL1, 0x00063073); /* set AVC maximum threshold, adr 08d4, dat ffff0024 */ status = vid_blk_read_word(dev, PATH1_VOL_CTL, &dwval); status = vid_blk_write_word(dev, PATH1_VOL_CTL, (dwval | FLD_PATH1_AVC_THRESHOLD)); /* set SC maximum threshold, adr 08ec, dat ffffb3a3 */ status = vid_blk_read_word(dev, PATH1_SC_CTL, &dwval); status = vid_blk_write_word(dev, PATH1_SC_CTL, (dwval | FLD_PATH1_SC_THRESHOLD)); break; case AUDIO_INPUT_TUNER_TV: default: status = stopAudioFirmware(dev); /* Setup SRC sources and clocks */ status = vid_blk_write_word(dev, BAND_OUT_SEL, cx231xx_set_field(FLD_SRC6_IN_SEL, 0x00) | cx231xx_set_field(FLD_SRC6_CLK_SEL, 0x01) | cx231xx_set_field(FLD_SRC5_IN_SEL, 0x00) | cx231xx_set_field(FLD_SRC5_CLK_SEL, 0x02) | cx231xx_set_field(FLD_SRC4_IN_SEL, 0x02) | cx231xx_set_field(FLD_SRC4_CLK_SEL, 0x03) | cx231xx_set_field(FLD_SRC3_IN_SEL, 0x00) | cx231xx_set_field(FLD_SRC3_CLK_SEL, 0x00) | cx231xx_set_field(FLD_BASEBAND_BYPASS_CTL, 0x00) | cx231xx_set_field(FLD_AC97_SRC_SEL, 0x03) | cx231xx_set_field(FLD_I2S_SRC_SEL, 0x00) | cx231xx_set_field(FLD_PARALLEL2_SRC_SEL, 0x02) | cx231xx_set_field(FLD_PARALLEL1_SRC_SEL, 0x01)); /* Setup the AUD_IO control */ status = vid_blk_write_word(dev, AUD_IO_CTRL, cx231xx_set_field(FLD_I2S_PORT_DIR, 0x00) | cx231xx_set_field(FLD_I2S_OUT_SRC, 0x00) | cx231xx_set_field(FLD_AUD_CHAN3_SRC, 0x00) | cx231xx_set_field(FLD_AUD_CHAN2_SRC, 0x00) | cx231xx_set_field(FLD_AUD_CHAN1_SRC, 0x03)); status = vid_blk_write_word(dev, PATH1_CTL1, 0x1F063870); /* setAudioStandard(_audio_standard); */ status = vid_blk_write_word(dev, PATH1_CTL1, 0x00063870); status = restartAudioFirmware(dev); switch (dev->board.tuner_type) { case TUNER_XC5000: /* SIF passthrough at 28.6363 MHz sample rate */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, CHIP_CTRL, FLD_SIF_EN, cx231xx_set_field(FLD_SIF_EN, 1)); break; case TUNER_NXP_TDA18271: /* Normal mode: SIF passthrough at 14.32 MHz */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, CHIP_CTRL, FLD_SIF_EN, cx231xx_set_field(FLD_SIF_EN, 0)); break; default: switch (dev->model) { /* i2c device tuners */ case CX231XX_BOARD_HAUPPAUGE_930C_HD_1114xx: case CX231XX_BOARD_HAUPPAUGE_935C: case CX231XX_BOARD_HAUPPAUGE_955Q: case CX231XX_BOARD_HAUPPAUGE_975: case CX231XX_BOARD_EVROMEDIA_FULL_HYBRID_FULLHD: /* TODO: Normal mode: SIF passthrough at 14.32 MHz?? */ break; default: /* This is just a casual suggestion to people adding new boards in case they use a tuner type we don't currently know about */ dev_info(dev->dev, "Unknown tuner type configuring SIF"); break; } } break; case AUDIO_INPUT_TUNER_FM: /* use SIF for FM radio setupFM(); setAudioStandard(_audio_standard); */ break; case AUDIO_INPUT_MUTE: status = vid_blk_write_word(dev, PATH1_CTL1, 0x1F011012); break; } /* Take it out of soft reset */ status = vid_blk_read_byte(dev, GENERAL_CTL, &gen_ctrl); gen_ctrl &= ~1; status = vid_blk_write_byte(dev, GENERAL_CTL, gen_ctrl); return status; } /****************************************************************************** * C H I P Specific C O N T R O L functions * ******************************************************************************/ int cx231xx_init_ctrl_pin_status(struct cx231xx *dev) { u32 value; int status = 0; status = vid_blk_read_word(dev, PIN_CTRL, &value); value |= (~dev->board.ctl_pin_status_mask); status = vid_blk_write_word(dev, PIN_CTRL, value); return status; } int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev, u8 analog_or_digital) { int status; /* first set the direction to output */ status = cx231xx_set_gpio_direction(dev, dev->board. agc_analog_digital_select_gpio, 1); /* 0 - demod ; 1 - Analog mode */ status = cx231xx_set_gpio_value(dev, dev->board.agc_analog_digital_select_gpio, analog_or_digital); if (status < 0) return status; return 0; } int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3) { u8 value[4] = { 0, 0, 0, 0 }; int status = 0; bool current_is_port_3; /* * Should this code check dev->port_3_switch_enabled first * to skip unnecessary reading of the register? * If yes, the flag dev->port_3_switch_enabled must be initialized * correctly. */ status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); if (status < 0) return status; current_is_port_3 = value[0] & I2C_DEMOD_EN ? true : false; /* Just return, if already using the right port */ if (current_is_port_3 == is_port_3) return 0; if (is_port_3) value[0] |= I2C_DEMOD_EN; else value[0] &= ~I2C_DEMOD_EN; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); /* remember status of the switch for usage in is_tuner */ if (status >= 0) dev->port_3_switch_enabled = is_port_3; return status; } EXPORT_SYMBOL_GPL(cx231xx_enable_i2c_port_3); void update_HH_register_after_set_DIF(struct cx231xx *dev) { /* u8 status = 0; u32 value = 0; vid_blk_write_word(dev, PIN_CTRL, 0xA0FFF82F); vid_blk_write_word(dev, DIF_MISC_CTRL, 0x0A203F11); vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1BEFBF06); status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value); vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390); status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value); */ } #if 0 static void cx231xx_dump_SC_reg(struct cx231xx *dev) { u8 value[4] = { 0, 0, 0, 0 }; dev_dbg(dev->dev, "%s!\n", __func__); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); dev_dbg(dev->dev, "reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0], value[1], value[2], value[3]); } #endif void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev) { u8 value = 0; afe_read_byte(dev, ADC_STATUS2_CH3, &value); value = (value & 0xFE)|0x01; afe_write_byte(dev, ADC_STATUS2_CH3, value); afe_read_byte(dev, ADC_STATUS2_CH3, &value); value = (value & 0xFE)|0x00; afe_write_byte(dev, ADC_STATUS2_CH3, value); /* config colibri to lo-if mode FIXME: ntf_mode = 2'b00 by default. But set 0x1 would reduce the diff IF input by half, for low-if agc defect */ afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &value); value = (value & 0xFC)|0x00; afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, value); afe_read_byte(dev, ADC_INPUT_CH3, &value); value = (value & 0xF9)|0x02; afe_write_byte(dev, ADC_INPUT_CH3, value); afe_read_byte(dev, ADC_FB_FRCRST_CH3, &value); value = (value & 0xFB)|0x04; afe_write_byte(dev, ADC_FB_FRCRST_CH3, value); afe_read_byte(dev, ADC_DCSERVO_DEM_CH3, &value); value = (value & 0xFC)|0x03; afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, value); afe_read_byte(dev, ADC_CTRL_DAC1_CH3, &value); value = (value & 0xFB)|0x04; afe_write_byte(dev, ADC_CTRL_DAC1_CH3, value); afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value); value = (value & 0xF8)|0x06; afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value); afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value); value = (value & 0x8F)|0x40; afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value); afe_read_byte(dev, ADC_PWRDN_CLAMP_CH3, &value); value = (value & 0xDF)|0x20; afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, value); } void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq, u8 spectral_invert, u32 mode) { u32 colibri_carrier_offset = 0; u32 func_mode = 0x01; /* Device has a DIF if this function is called */ u32 standard = 0; u8 value[4] = { 0, 0, 0, 0 }; dev_dbg(dev->dev, "Enter cx231xx_set_Colibri_For_LowIF()\n"); value[0] = (u8) 0x6F; value[1] = (u8) 0x6F; value[2] = (u8) 0x6F; value[3] = (u8) 0x6F; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); /*Set colibri for low IF*/ cx231xx_afe_set_mode(dev, AFE_MODE_LOW_IF); /* Set C2HH for low IF operation.*/ standard = dev->norm; cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode, func_mode, standard); /* Get colibri offsets.*/ colibri_carrier_offset = cx231xx_Get_Colibri_CarrierOffset(mode, standard); dev_dbg(dev->dev, "colibri_carrier_offset=%d, standard=0x%x\n", colibri_carrier_offset, standard); /* Set the band Pass filter for DIF*/ cx231xx_set_DIF_bandpass(dev, (if_freq+colibri_carrier_offset), spectral_invert, mode); } u32 cx231xx_Get_Colibri_CarrierOffset(u32 mode, u32 standerd) { u32 colibri_carrier_offset = 0; if (mode == TUNER_MODE_FM_RADIO) { colibri_carrier_offset = 1100000; } else if (standerd & (V4L2_STD_MN | V4L2_STD_NTSC_M_JP)) { colibri_carrier_offset = 4832000; /*4.83MHz */ } else if (standerd & (V4L2_STD_PAL_B | V4L2_STD_PAL_G)) { colibri_carrier_offset = 2700000; /*2.70MHz */ } else if (standerd & (V4L2_STD_PAL_D | V4L2_STD_PAL_I | V4L2_STD_SECAM)) { colibri_carrier_offset = 2100000; /*2.10MHz */ } return colibri_carrier_offset; } void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq, u8 spectral_invert, u32 mode) { unsigned long pll_freq_word; u32 dif_misc_ctrl_value = 0; u64 pll_freq_u64 = 0; u32 i = 0; dev_dbg(dev->dev, "if_freq=%d;spectral_invert=0x%x;mode=0x%x\n", if_freq, spectral_invert, mode); if (mode == TUNER_MODE_FM_RADIO) { pll_freq_word = 0x905A1CAC; vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word); } else /*KSPROPERTY_TUNER_MODE_TV*/{ /* Calculate the PLL frequency word based on the adjusted if_freq*/ pll_freq_word = if_freq; pll_freq_u64 = (u64)pll_freq_word << 28L; do_div(pll_freq_u64, 50000000); pll_freq_word = (u32)pll_freq_u64; /*pll_freq_word = 0x3463497;*/ vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word); if (spectral_invert) { if_freq -= 400000; /* Enable Spectral Invert*/ vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); dif_misc_ctrl_value = dif_misc_ctrl_value | 0x00200000; vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); } else { if_freq += 400000; /* Disable Spectral Invert*/ vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); dif_misc_ctrl_value = dif_misc_ctrl_value & 0xFFDFFFFF; vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); } if_freq = (if_freq / 100000) * 100000; if (if_freq < 3000000) if_freq = 3000000; if (if_freq > 16000000) if_freq = 16000000; } dev_dbg(dev->dev, "Enter IF=%zu\n", ARRAY_SIZE(Dif_set_array)); for (i = 0; i < ARRAY_SIZE(Dif_set_array); i++) { if (Dif_set_array[i].if_freq == if_freq) { vid_blk_write_word(dev, Dif_set_array[i].register_address, Dif_set_array[i].value); } } } /****************************************************************************** * D I F - B L O C K C O N T R O L functions * ******************************************************************************/ int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode, u32 function_mode, u32 standard) { int status = 0; if (mode == V4L2_TUNER_RADIO) { /* C2HH */ /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xFF); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); } else if (standard != DIF_USE_BASEBAND) { if (standard & V4L2_STD_MN) { /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xb); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); /* 0x124, AUD_CHAN1_SRC = 0x3 */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AUD_IO_CTRL, 0, 31, 0x00000003); } else if ((standard == V4L2_STD_PAL_I) | (standard & V4L2_STD_PAL_D) | (standard & V4L2_STD_SECAM)) { /* C2HH setup */ /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xF); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); } else { /* default PAL BG */ /* C2HH setup */ /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xE); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); } } return status; } int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard) { int status = 0; u32 dif_misc_ctrl_value = 0; u32 func_mode = 0; dev_dbg(dev->dev, "%s: setStandard to %x\n", __func__, standard); status = vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); if (standard != DIF_USE_BASEBAND) dev->norm = standard; switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: case CX231XX_BOARD_CNXT_VIDEO_GRABBER: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_OTG102: func_mode = 0x03; break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: func_mode = 0x01; break; default: func_mode = 0x01; } status = cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode, func_mode, standard); if (standard == DIF_USE_BASEBAND) { /* base band */ /* There is a different SRC_PHASE_INC value for baseband vs. DIF */ status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0xDF7DF83); status = vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); dif_misc_ctrl_value |= FLD_DIF_DIF_BYPASS; status = vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); } else if (standard & V4L2_STD_PAL_D) { status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x444C1380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xDA302600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xDA261700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xDA262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0x72500800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3934EA); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a023F11; } else if (standard & V4L2_STD_PAL_I) { status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x444C1380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xDA302600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xDA261700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xDA262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0x72500800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x5F39A934); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a033F11; } else if (standard & V4L2_STD_PAL_M) { /* improved Low Frequency Phase Noise */ status = vid_blk_write_word(dev, DIF_PLL_CTRL, 0xFF01FF0C); status = vid_blk_write_word(dev, DIF_PLL_CTRL1, 0xbd038c85); status = vid_blk_write_word(dev, DIF_PLL_CTRL2, 0x1db4640a); status = vid_blk_write_word(dev, DIF_PLL_CTRL3, 0x00008800); status = vid_blk_write_word(dev, DIF_AGC_IF_REF, 0x444C1380); status = vid_blk_write_word(dev, DIF_AGC_IF_INT_CURRENT, 0x26001700); status = vid_blk_write_word(dev, DIF_AGC_RF_CURRENT, 0x00002660); status = vid_blk_write_word(dev, DIF_VIDEO_AGC_CTRL, 0x72500800); status = vid_blk_write_word(dev, DIF_VID_AUD_OVERRIDE, 0x27000100); status = vid_blk_write_word(dev, DIF_AV_SEP_CTRL, 0x012c405d); status = vid_blk_write_word(dev, DIF_COMP_FLT_CTRL, 0x009f50c1); status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1befbf06); status = vid_blk_write_word(dev, DIF_SRC_GAIN_CONTROL, 0x000035e8); status = vid_blk_write_word(dev, DIF_SOFT_RST_CTRL_REVB, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3A0A3F10; } else if (standard & (V4L2_STD_PAL_N | V4L2_STD_PAL_Nc)) { /* improved Low Frequency Phase Noise */ status = vid_blk_write_word(dev, DIF_PLL_CTRL, 0xFF01FF0C); status = vid_blk_write_word(dev, DIF_PLL_CTRL1, 0xbd038c85); status = vid_blk_write_word(dev, DIF_PLL_CTRL2, 0x1db4640a); status = vid_blk_write_word(dev, DIF_PLL_CTRL3, 0x00008800); status = vid_blk_write_word(dev, DIF_AGC_IF_REF, 0x444C1380); status = vid_blk_write_word(dev, DIF_AGC_IF_INT_CURRENT, 0x26001700); status = vid_blk_write_word(dev, DIF_AGC_RF_CURRENT, 0x00002660); status = vid_blk_write_word(dev, DIF_VIDEO_AGC_CTRL, 0x72500800); status = vid_blk_write_word(dev, DIF_VID_AUD_OVERRIDE, 0x27000100); status = vid_blk_write_word(dev, DIF_AV_SEP_CTRL, 0x012c405d); status = vid_blk_write_word(dev, DIF_COMP_FLT_CTRL, 0x009f50c1); status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1befbf06); status = vid_blk_write_word(dev, DIF_SRC_GAIN_CONTROL, 0x000035e8); status = vid_blk_write_word(dev, DIF_SOFT_RST_CTRL_REVB, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value = 0x3A093F10; } else if (standard & (V4L2_STD_SECAM_B | V4L2_STD_SECAM_D | V4L2_STD_SECAM_G | V4L2_STD_SECAM_K | V4L2_STD_SECAM_K1)) { status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x888C0380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xe0262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xc2171700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xc2262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3530ec); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0xf4000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a023F11; } else if (standard & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC)) { /* Is it SECAM_L1? */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x888C0380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xe0262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xc2171700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xc2262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3530ec); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0xf2560000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a023F11; } else if (standard & V4L2_STD_NTSC_M) { /* V4L2_STD_NTSC_M (75 IRE Setup) Or V4L2_STD_NTSC_M_JP (Japan, 0 IRE Setup) */ /* For NTSC the centre frequency of video coming out of sidewinder is around 7.1MHz or 3.6MHz depending on the spectral inversion. so for a non spectrally inverted channel the pll freq word is 0x03420c49 */ status = vid_blk_write_word(dev, DIF_PLL_CTRL, 0x6503BC0C); status = vid_blk_write_word(dev, DIF_PLL_CTRL1, 0xBD038C85); status = vid_blk_write_word(dev, DIF_PLL_CTRL2, 0x1DB4640A); status = vid_blk_write_word(dev, DIF_PLL_CTRL3, 0x00008800); status = vid_blk_write_word(dev, DIF_AGC_IF_REF, 0x444C0380); status = vid_blk_write_word(dev, DIF_AGC_IF_INT_CURRENT, 0x26001700); status = vid_blk_write_word(dev, DIF_AGC_RF_CURRENT, 0x00002660); status = vid_blk_write_word(dev, DIF_VIDEO_AGC_CTRL, 0x04000800); status = vid_blk_write_word(dev, DIF_VID_AUD_OVERRIDE, 0x27000100); status = vid_blk_write_word(dev, DIF_AV_SEP_CTRL, 0x01296e1f); status = vid_blk_write_word(dev, DIF_COMP_FLT_CTRL, 0x009f50c1); status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1befbf06); status = vid_blk_write_word(dev, DIF_SRC_GAIN_CONTROL, 0x000035e8); status = vid_blk_write_word(dev, DIF_AGC_CTRL_IF, 0xC2262600); status = vid_blk_write_word(dev, DIF_AGC_CTRL_INT, 0xC2262600); status = vid_blk_write_word(dev, DIF_AGC_CTRL_RF, 0xC2262600); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a003F10; } else { /* default PAL BG */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x444C1380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xDA302600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xDA261700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xDA262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0x72500800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3530EC); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00A653A8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a013F11; } /* The AGC values should be the same for all standards, AUD_SRC_SEL[19] should always be disabled */ dif_misc_ctrl_value &= ~FLD_DIF_AUD_SRC_SEL; /* It is still possible to get Set Standard calls even when we are in FM mode. This is done to override the value for FM. */ if (dev->active_mode == V4L2_TUNER_RADIO) dif_misc_ctrl_value = 0x7a080000; /* Write the calculated value for misc ontrol register */ status = vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); return status; } int cx231xx_tuner_pre_channel_change(struct cx231xx *dev) { int status = 0; u32 dwval; /* Set the RF and IF k_agc values to 3 */ status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval); dwval &= ~(FLD_DIF_K_AGC_RF | FLD_DIF_K_AGC_IF); dwval |= 0x33000000; status = vid_blk_write_word(dev, DIF_AGC_IF_REF, dwval); return status; } int cx231xx_tuner_post_channel_change(struct cx231xx *dev) { int status = 0; u32 dwval; dev_dbg(dev->dev, "%s: dev->tuner_type =0%d\n", __func__, dev->tuner_type); /* Set the RF and IF k_agc values to 4 for PAL/NTSC and 8 for * SECAM L/B/D standards */ status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval); dwval &= ~(FLD_DIF_K_AGC_RF | FLD_DIF_K_AGC_IF); if (dev->norm & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_B | V4L2_STD_SECAM_D)) { if (dev->tuner_type == TUNER_NXP_TDA18271) { dwval &= ~FLD_DIF_IF_REF; dwval |= 0x88000300; } else dwval |= 0x88000000; } else { if (dev->tuner_type == TUNER_NXP_TDA18271) { dwval &= ~FLD_DIF_IF_REF; dwval |= 0xCC000300; } else dwval |= 0x44000000; } status = vid_blk_write_word(dev, DIF_AGC_IF_REF, dwval); return status == sizeof(dwval) ? 0 : -EIO; } /****************************************************************************** * I 2 S - B L O C K C O N T R O L functions * ******************************************************************************/ int cx231xx_i2s_blk_initialize(struct cx231xx *dev) { int status = 0; u32 value; status = cx231xx_read_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL1, 1, &value, 1); /* enables clock to delta-sigma and decimation filter */ value |= 0x80; status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL1, 1, value, 1); /* power up all channel */ status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, 0x00, 1); return status; } int cx231xx_i2s_blk_update_power_control(struct cx231xx *dev, enum AV_MODE avmode) { int status = 0; u32 value = 0; if (avmode != POLARIS_AVMODE_ENXTERNAL_AV) { status = cx231xx_read_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, &value, 1); value |= 0xfe; status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, value, 1); } else { status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, 0x00, 1); } return status; } /* set i2s_blk for audio input types */ int cx231xx_i2s_blk_set_audio_input(struct cx231xx *dev, u8 audio_input) { int status = 0; switch (audio_input) { case CX231XX_AMUX_LINE_IN: status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, 0x00, 1); status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL1, 1, 0x80, 1); break; case CX231XX_AMUX_VIDEO: default: break; } dev->ctl_ainput = audio_input; return status; } /****************************************************************************** * P O W E R C O N T R O L functions * ******************************************************************************/ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode) { u8 value[4] = { 0, 0, 0, 0 }; u32 tmp = 0; int status = 0; if (dev->power_mode != mode) dev->power_mode = mode; else { dev_dbg(dev->dev, "%s: mode = %d, No Change req.\n", __func__, mode); return 0; } status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((__le32 *) value)); switch (mode) { case POLARIS_AVMODE_ENXTERNAL_AV: tmp &= (~PWR_MODE_MASK); tmp |= PWR_AV_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); tmp |= PWR_ISO_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); tmp |= POLARIS_AVMODE_ENXTERNAL_AV; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); /* reset state of xceive tuner */ dev->xc_fw_load_done = 0; break; case POLARIS_AVMODE_ANALOGT_TV: tmp |= PWR_DEMOD_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); if (!(tmp & PWR_TUNER_EN)) { tmp |= (PWR_TUNER_EN); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_AV_EN)) { tmp |= PWR_AV_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_ISO_EN)) { tmp |= PWR_ISO_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & POLARIS_AVMODE_ANALOGT_TV)) { tmp |= POLARIS_AVMODE_ANALOGT_TV; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (dev->board.tuner_type != TUNER_ABSENT) { /* reset the Tuner */ if (dev->board.tuner_gpio) cx231xx_gpio_set(dev, dev->board.tuner_gpio); if (dev->cx231xx_reset_analog_tuner) dev->cx231xx_reset_analog_tuner(dev); } break; case POLARIS_AVMODE_DIGITAL: if (!(tmp & PWR_TUNER_EN)) { tmp |= (PWR_TUNER_EN); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_AV_EN)) { tmp |= PWR_AV_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_ISO_EN)) { tmp |= PWR_ISO_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } tmp &= (~PWR_AV_MODE); tmp |= POLARIS_AVMODE_DIGITAL; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); if (!(tmp & PWR_DEMOD_EN)) { tmp |= PWR_DEMOD_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (dev->board.tuner_type != TUNER_ABSENT) { /* reset the Tuner */ if (dev->board.tuner_gpio) cx231xx_gpio_set(dev, dev->board.tuner_gpio); if (dev->cx231xx_reset_analog_tuner) dev->cx231xx_reset_analog_tuner(dev); } break; default: break; } msleep(PWR_SLEEP_INTERVAL); /* For power saving, only enable Pwr_resetout_n when digital TV is selected. */ if (mode == POLARIS_AVMODE_DIGITAL) { tmp |= PWR_RESETOUT_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } /* update power control for afe */ status = cx231xx_afe_update_power_control(dev, mode); /* update power control for i2s_blk */ status = cx231xx_i2s_blk_update_power_control(dev, mode); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); return status; } /****************************************************************************** * S T R E A M C O N T R O L functions * ******************************************************************************/ int cx231xx_start_stream(struct cx231xx *dev, u32 ep_mask) { u8 value[4] = { 0x0, 0x0, 0x0, 0x0 }; u32 tmp = 0; int status = 0; dev_dbg(dev->dev, "%s: ep_mask = %x\n", __func__, ep_mask); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((__le32 *) value)); tmp |= ep_mask; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, EP_MODE_SET, value, 4); return status; } int cx231xx_stop_stream(struct cx231xx *dev, u32 ep_mask) { u8 value[4] = { 0x0, 0x0, 0x0, 0x0 }; u32 tmp = 0; int status = 0; dev_dbg(dev->dev, "%s: ep_mask = %x\n", __func__, ep_mask); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((__le32 *) value)); tmp &= (~ep_mask); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, EP_MODE_SET, value, 4); return status; } int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type) { int status = 0; u32 value = 0; u8 val[4] = { 0, 0, 0, 0 }; if (dev->udev->speed == USB_SPEED_HIGH) { switch (media_type) { case Audio: dev_dbg(dev->dev, "%s: Audio enter HANC\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x9300); break; case Vbi: dev_dbg(dev->dev, "%s: set vanc registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x300); break; case Sliced_cc: dev_dbg(dev->dev, "%s: set hanc registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x1300); break; case Raw_Video: dev_dbg(dev->dev, "%s: set video registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100); break; case TS1_serial_mode: dev_dbg(dev->dev, "%s: set ts1 registers", __func__); if (dev->board.has_417) { dev_dbg(dev->dev, "%s: MPEG\n", __func__); value &= 0xFFFFFFFC; value |= 0x3; status = cx231xx_mode_register(dev, TS_MODE_REG, value); val[0] = 0x04; val[1] = 0xA3; val[2] = 0x3B; val[3] = 0x00; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_CFG_REG, val, 4); val[0] = 0x00; val[1] = 0x08; val[2] = 0x00; val[3] = 0x08; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_LENGTH_REG, val, 4); } else { dev_dbg(dev->dev, "%s: BDA\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101); status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x010); } break; case TS1_parallel_mode: dev_dbg(dev->dev, "%s: set ts1 parallel mode registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100); status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x400); break; } } else { status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101); } return status; } int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type) { int rc = -1; u32 ep_mask = -1; struct pcb_config *pcb_config; /* get EP for media type */ pcb_config = (struct pcb_config *)&dev->current_pcb_config; if (pcb_config->config_num) { switch (media_type) { case Raw_Video: ep_mask = ENABLE_EP4; /* ep4 [00:1000] */ break; case Audio: ep_mask = ENABLE_EP3; /* ep3 [00:0100] */ break; case Vbi: ep_mask = ENABLE_EP5; /* ep5 [01:0000] */ break; case Sliced_cc: ep_mask = ENABLE_EP6; /* ep6 [10:0000] */ break; case TS1_serial_mode: case TS1_parallel_mode: ep_mask = ENABLE_EP1; /* ep1 [00:0001] */ break; case TS2: ep_mask = ENABLE_EP2; /* ep2 [00:0010] */ break; } } if (start) { rc = cx231xx_initialize_stream_xfer(dev, media_type); if (rc < 0) return rc; /* enable video capture */ if (ep_mask > 0) rc = cx231xx_start_stream(dev, ep_mask); } else { /* disable video capture */ if (ep_mask > 0) rc = cx231xx_stop_stream(dev, ep_mask); } return rc; } EXPORT_SYMBOL_GPL(cx231xx_capture_start); /***************************************************************************** * G P I O B I T control functions * ******************************************************************************/ static int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u32 gpio_val) { int status = 0; gpio_val = (__force u32)cpu_to_le32(gpio_val); status = cx231xx_send_gpio_cmd(dev, gpio_bit, (u8 *)&gpio_val, 4, 0, 0); return status; } static int cx231xx_get_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u32 *gpio_val) { __le32 tmp; int status = 0; status = cx231xx_send_gpio_cmd(dev, gpio_bit, (u8 *)&tmp, 4, 0, 1); *gpio_val = le32_to_cpu(tmp); return status; } /* * cx231xx_set_gpio_direction * Sets the direction of the GPIO pin to input or output * * Parameters : * pin_number : The GPIO Pin number to program the direction for * from 0 to 31 * pin_value : The Direction of the GPIO Pin under reference. * 0 = Input direction * 1 = Output direction */ int cx231xx_set_gpio_direction(struct cx231xx *dev, int pin_number, int pin_value) { int status = 0; u32 value = 0; /* Check for valid pin_number - if 32 , bail out */ if (pin_number >= 32) return -EINVAL; /* input */ if (pin_value == 0) value = dev->gpio_dir & (~(1 << pin_number)); /* clear */ else value = dev->gpio_dir | (1 << pin_number); status = cx231xx_set_gpio_bit(dev, value, dev->gpio_val); /* cache the value for future */ dev->gpio_dir = value; return status; } /* * cx231xx_set_gpio_value * Sets the value of the GPIO pin to Logic high or low. The Pin under * reference should ALREADY BE SET IN OUTPUT MODE !!!!!!!!! * * Parameters : * pin_number : The GPIO Pin number to program the direction for * pin_value : The value of the GPIO Pin under reference. * 0 = set it to 0 * 1 = set it to 1 */ int cx231xx_set_gpio_value(struct cx231xx *dev, int pin_number, int pin_value) { int status = 0; u32 value = 0; /* Check for valid pin_number - if 0xFF , bail out */ if (pin_number >= 32) return -EINVAL; /* first do a sanity check - if the Pin is not output, make it output */ if ((dev->gpio_dir & (1 << pin_number)) == 0x00) { /* It was in input mode */ value = dev->gpio_dir | (1 << pin_number); dev->gpio_dir = value; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); } if (pin_value == 0) value = dev->gpio_val & (~(1 << pin_number)); else value = dev->gpio_val | (1 << pin_number); /* store the value */ dev->gpio_val = value; /* toggle bit0 of GP_IO */ status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } /***************************************************************************** * G P I O I2C related functions * ******************************************************************************/ int cx231xx_gpio_i2c_start(struct cx231xx *dev) { int status = 0; /* set SCL to output 1 ; set SDA to output 1 */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; dev->gpio_val |= 1 << dev->board.tuner_sda_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to output 1; set SDA to output 0 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to output 0; set SDA to output 0 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; return status; } int cx231xx_gpio_i2c_end(struct cx231xx *dev) { int status = 0; /* set SCL to output 0; set SDA to output 0 */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to output 1; set SDA to output 0 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to input ,release SCL cable control set SDA to input ,release SDA cable control */ dev->gpio_dir &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; return status; } int cx231xx_gpio_i2c_write_byte(struct cx231xx *dev, u8 data) { int status = 0; u8 i; /* set SCL to output ; set SDA to output */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; for (i = 0; i < 8; i++) { if (((data << i) & 0x80) == 0) { /* set SCL to output 0; set SDA to output 0 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 1; set SDA to output 0 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 0; set SDA to output 0 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); } else { /* set SCL to output 0; set SDA to output 1 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val |= 1 << dev->board.tuner_sda_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 1; set SDA to output 1 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 0; set SDA to output 1 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); } } return status; } int cx231xx_gpio_i2c_read_byte(struct cx231xx *dev, u8 *buf) { u8 value = 0; int status = 0; u32 gpio_logic_value = 0; u8 i; /* read byte */ for (i = 0; i < 8; i++) { /* send write I2c addr */ /* set SCL to output 0; set SDA to input */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 1; set SDA to input */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* get SDA data bit */ gpio_logic_value = dev->gpio_val; status = cx231xx_get_gpio_bit(dev, dev->gpio_dir, &dev->gpio_val); if ((dev->gpio_val & (1 << dev->board.tuner_sda_gpio)) != 0) value |= (1 << (8 - i - 1)); dev->gpio_val = gpio_logic_value; } /* set SCL to output 0,finish the read latest SCL signal. !!!set SDA to input, never to modify SDA direction at the same times */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* store the value */ *buf = value & 0xff; return status; } int cx231xx_gpio_i2c_read_ack(struct cx231xx *dev) { int status = 0; u32 gpio_logic_value = 0; int nCnt = 10; int nInit = nCnt; /* clock stretch; set SCL to input; set SDA to input; get SCL value till SCL = 1 */ dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); dev->gpio_dir &= ~(1 << dev->board.tuner_scl_gpio); gpio_logic_value = dev->gpio_val; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); do { msleep(2); status = cx231xx_get_gpio_bit(dev, dev->gpio_dir, &dev->gpio_val); nCnt--; } while (((dev->gpio_val & (1 << dev->board.tuner_scl_gpio)) == 0) && (nCnt > 0)); if (nCnt == 0) dev_dbg(dev->dev, "No ACK after %d msec -GPIO I2C failed!", nInit * 10); /* * readAck * through clock stretch, slave has given a SCL signal, * so the SDA data can be directly read. */ status = cx231xx_get_gpio_bit(dev, dev->gpio_dir, &dev->gpio_val); if ((dev->gpio_val & 1 << dev->board.tuner_sda_gpio) == 0) { dev->gpio_val = gpio_logic_value; dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = 0; } else { dev->gpio_val = gpio_logic_value; dev->gpio_val |= (1 << dev->board.tuner_sda_gpio); } /* read SDA end, set the SCL to output 0, after this operation, SDA direction can be changed. */ dev->gpio_val = gpio_logic_value; dev->gpio_dir |= (1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } int cx231xx_gpio_i2c_write_ack(struct cx231xx *dev) { int status = 0; /* set SDA to output */ dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL = 0 (output); set SDA = 0 (output) */ dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL = 1 (output); set SDA = 0 (output) */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL = 0 (output); set SDA = 0 (output) */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SDA to input,and then the slave will read data from SDA. */ dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } int cx231xx_gpio_i2c_write_nak(struct cx231xx *dev) { int status = 0; /* set scl to output ; set sda to input */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set scl to output 0; set sda to input */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set scl to output 1; set sda to input */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } /***************************************************************************** * G P I O I2C related functions * ******************************************************************************/ /* cx231xx_gpio_i2c_read * Function to read data from gpio based I2C interface */ int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len) { int status = 0; int i = 0; /* get the lock */ mutex_lock(&dev->gpio_i2c_lock); /* start */ status = cx231xx_gpio_i2c_start(dev); /* write dev_addr */ status = cx231xx_gpio_i2c_write_byte(dev, (dev_addr << 1) + 1); /* readAck */ status = cx231xx_gpio_i2c_read_ack(dev); /* read data */ for (i = 0; i < len; i++) { /* read data */ buf[i] = 0; status = cx231xx_gpio_i2c_read_byte(dev, &buf[i]); if ((i + 1) != len) { /* only do write ack if we more length */ status = cx231xx_gpio_i2c_write_ack(dev); } } /* write NAK - inform reads are complete */ status = cx231xx_gpio_i2c_write_nak(dev); /* write end */ status = cx231xx_gpio_i2c_end(dev); /* release the lock */ mutex_unlock(&dev->gpio_i2c_lock); return status; } /* cx231xx_gpio_i2c_write * Function to write data to gpio based I2C interface */ int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len) { int i = 0; /* get the lock */ mutex_lock(&dev->gpio_i2c_lock); /* start */ cx231xx_gpio_i2c_start(dev); /* write dev_addr */ cx231xx_gpio_i2c_write_byte(dev, dev_addr << 1); /* read Ack */ cx231xx_gpio_i2c_read_ack(dev); for (i = 0; i < len; i++) { /* Write data */ cx231xx_gpio_i2c_write_byte(dev, buf[i]); /* read Ack */ cx231xx_gpio_i2c_read_ack(dev); } /* write End */ cx231xx_gpio_i2c_end(dev); /* release the lock */ mutex_unlock(&dev->gpio_i2c_lock); return 0; } |
| 7 2 2 118 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 | /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright (c) 2009-2010, Code Aurora Forum. * Copyright 2016 Intel Corp. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _DRM_DRV_H_ #define _DRM_DRV_H_ #include <linux/list.h> #include <linux/irqreturn.h> #include <video/nomodeset.h> #include <drm/drm_device.h> struct dmem_cgroup_region; struct drm_fb_helper; struct drm_fb_helper_surface_size; struct drm_file; struct drm_gem_object; struct drm_master; struct drm_minor; struct dma_buf; struct dma_buf_attachment; struct drm_display_mode; struct drm_mode_create_dumb; struct drm_printer; struct sg_table; /** * enum drm_driver_feature - feature flags * * See &drm_driver.driver_features, drm_device.driver_features and * drm_core_check_feature(). */ enum drm_driver_feature { /** * @DRIVER_GEM: * * Driver use the GEM memory manager. This should be set for all modern * drivers. */ DRIVER_GEM = BIT(0), /** * @DRIVER_MODESET: * * Driver supports mode setting interfaces (KMS). */ DRIVER_MODESET = BIT(1), /** * @DRIVER_RENDER: * * Driver supports dedicated render nodes. See also the :ref:`section on * render nodes <drm_render_node>` for details. */ DRIVER_RENDER = BIT(3), /** * @DRIVER_ATOMIC: * * Driver supports the full atomic modesetting userspace API. Drivers * which only use atomic internally, but do not support the full * userspace API (e.g. not all properties converted to atomic, or * multi-plane updates are not guaranteed to be tear-free) should not * set this flag. */ DRIVER_ATOMIC = BIT(4), /** * @DRIVER_SYNCOBJ: * * Driver supports &drm_syncobj for explicit synchronization of command * submission. */ DRIVER_SYNCOBJ = BIT(5), /** * @DRIVER_SYNCOBJ_TIMELINE: * * Driver supports the timeline flavor of &drm_syncobj for explicit * synchronization of command submission. */ DRIVER_SYNCOBJ_TIMELINE = BIT(6), /** * @DRIVER_COMPUTE_ACCEL: * * Driver supports compute acceleration devices. This flag is mutually exclusive with * @DRIVER_RENDER and @DRIVER_MODESET. Devices that support both graphics and compute * acceleration should be handled by two drivers that are connected using auxiliary bus. */ DRIVER_COMPUTE_ACCEL = BIT(7), /** * @DRIVER_GEM_GPUVA: * * Driver supports user defined GPU VA bindings for GEM objects. */ DRIVER_GEM_GPUVA = BIT(8), /** * @DRIVER_CURSOR_HOTSPOT: * * Driver supports and requires cursor hotspot information in the * cursor plane (e.g. cursor plane has to actually track the mouse * cursor and the clients are required to set hotspot in order for * the cursor planes to work correctly). */ DRIVER_CURSOR_HOTSPOT = BIT(9), /* IMPORTANT: Below are all the legacy flags, add new ones above. */ /** * @DRIVER_USE_AGP: * * Set up DRM AGP support, see drm_agp_init(), the DRM core will manage * AGP resources. New drivers don't need this. */ DRIVER_USE_AGP = BIT(25), /** * @DRIVER_LEGACY: * * Denote a legacy driver using shadow attach. Do not use. */ DRIVER_LEGACY = BIT(26), /** * @DRIVER_PCI_DMA: * * Driver is capable of PCI DMA, mapping of PCI DMA buffers to userspace * will be enabled. Only for legacy drivers. Do not use. */ DRIVER_PCI_DMA = BIT(27), /** * @DRIVER_SG: * * Driver can perform scatter/gather DMA, allocation and mapping of * scatter/gather buffers will be enabled. Only for legacy drivers. Do * not use. */ DRIVER_SG = BIT(28), /** * @DRIVER_HAVE_DMA: * * Driver supports DMA, the userspace DMA API will be supported. Only * for legacy drivers. Do not use. */ DRIVER_HAVE_DMA = BIT(29), /** * @DRIVER_HAVE_IRQ: * * Legacy irq support. Only for legacy drivers. Do not use. */ DRIVER_HAVE_IRQ = BIT(30), }; /** * struct drm_driver - DRM driver structure * * This structure represent the common code for a family of cards. There will be * one &struct drm_device for each card present in this family. It contains lots * of vfunc entries, and a pile of those probably should be moved to more * appropriate places like &drm_mode_config_funcs or into a new operations * structure for GEM drivers. */ struct drm_driver { /** * @load: * * Backward-compatible driver callback to complete initialization steps * after the driver is registered. For this reason, may suffer from * race conditions and its use is deprecated for new drivers. It is * therefore only supported for existing drivers not yet converted to * the new scheme. See devm_drm_dev_alloc() and drm_dev_register() for * proper and race-free way to set up a &struct drm_device. * * This is deprecated, do not use! * * Returns: * * Zero on success, non-zero value on failure. */ int (*load) (struct drm_device *, unsigned long flags); /** * @open: * * Driver callback when a new &struct drm_file is opened. Useful for * setting up driver-private data structures like buffer allocators, * execution contexts or similar things. Such driver-private resources * must be released again in @postclose. * * Since the display/modeset side of DRM can only be owned by exactly * one &struct drm_file (see &drm_file.is_master and &drm_device.master) * there should never be a need to set up any modeset related resources * in this callback. Doing so would be a driver design bug. * * Returns: * * 0 on success, a negative error code on failure, which will be * promoted to userspace as the result of the open() system call. */ int (*open) (struct drm_device *, struct drm_file *); /** * @postclose: * * One of the driver callbacks when a new &struct drm_file is closed. * Useful for tearing down driver-private data structures allocated in * @open like buffer allocators, execution contexts or similar things. * * Since the display/modeset side of DRM can only be owned by exactly * one &struct drm_file (see &drm_file.is_master and &drm_device.master) * there should never be a need to tear down any modeset related * resources in this callback. Doing so would be a driver design bug. */ void (*postclose) (struct drm_device *, struct drm_file *); /** * @unload: * * Reverse the effects of the driver load callback. Ideally, * the clean up performed by the driver should happen in the * reverse order of the initialization. Similarly to the load * hook, this handler is deprecated and its usage should be * dropped in favor of an open-coded teardown function at the * driver layer. See drm_dev_unregister() and drm_dev_put() * for the proper way to remove a &struct drm_device. * * The unload() hook is called right after unregistering * the device. * */ void (*unload) (struct drm_device *); /** * @release: * * Optional callback for destroying device data after the final * reference is released, i.e. the device is being destroyed. * * This is deprecated, clean up all memory allocations associated with a * &drm_device using drmm_add_action(), drmm_kmalloc() and related * managed resources functions. */ void (*release) (struct drm_device *); /** * @master_set: * * Called whenever the minor master is set. Only used by vmwgfx. */ void (*master_set)(struct drm_device *dev, struct drm_file *file_priv, bool from_open); /** * @master_drop: * * Called whenever the minor master is dropped. Only used by vmwgfx. */ void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv); /** * @debugfs_init: * * Allows drivers to create driver-specific debugfs files. */ void (*debugfs_init)(struct drm_minor *minor); /** * @gem_create_object: constructor for gem objects * * Hook for allocating the GEM object struct, for use by the CMA * and SHMEM GEM helpers. Returns a GEM object on success, or an * ERR_PTR()-encoded error code otherwise. */ struct drm_gem_object *(*gem_create_object)(struct drm_device *dev, size_t size); /** * @prime_handle_to_fd: * * PRIME export function. Only used by vmwgfx. */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); /** * @prime_fd_to_handle: * * PRIME import function. Only used by vmwgfx. */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); /** * @gem_prime_import: * * Import hook for GEM drivers. * * This defaults to drm_gem_prime_import() if not set. */ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, struct dma_buf *dma_buf); /** * @gem_prime_import_sg_table: * * Optional hook used by the PRIME helper functions * drm_gem_prime_import() respectively drm_gem_prime_import_dev(). */ struct drm_gem_object *(*gem_prime_import_sg_table)( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); /** * @dumb_create: * * This creates a new dumb buffer in the driver's backing storage manager (GEM, * TTM or something else entirely) and returns the resulting buffer handle. This * handle can then be wrapped up into a framebuffer modeset object. * * Note that userspace is not allowed to use such objects for render * acceleration - drivers must create their own private ioctls for such a use * case. * * Width, height and depth are specified in the &drm_mode_create_dumb * argument. The callback needs to fill the handle, pitch and size for * the created buffer. * * Called by the user via ioctl. * * Returns: * * Zero on success, negative errno on failure. */ int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); /** * @dumb_map_offset: * * Allocate an offset in the drm device node's address space to be able to * memory map a dumb buffer. * * The default implementation is drm_gem_create_mmap_offset(). GEM based * drivers must not overwrite this. * * Called by the user via ioctl. * * Returns: * * Zero on success, negative errno on failure. */ int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); /** * @fbdev_probe: * * Allocates and initialize the fb_info structure for fbdev emulation. * Furthermore it also needs to allocate the DRM framebuffer used to * back the fbdev. * * This callback is mandatory for fbdev support. * * Returns: * * 0 on success ot a negative error code otherwise. */ int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper, struct drm_fb_helper_surface_size *sizes); /** * @show_fdinfo: * * Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst. */ void (*show_fdinfo)(struct drm_printer *p, struct drm_file *f); /** @major: driver major number */ int major; /** @minor: driver minor number */ int minor; /** @patchlevel: driver patch level */ int patchlevel; /** @name: driver name */ char *name; /** @desc: driver description */ char *desc; /** * @driver_features: * Driver features, see &enum drm_driver_feature. Drivers can disable * some features on a per-instance basis using * &drm_device.driver_features. */ u32 driver_features; /** * @ioctls: * * Array of driver-private IOCTL description entries. See the chapter on * :ref:`IOCTL support in the userland interfaces * chapter<drm_driver_ioctl>` for the full details. */ const struct drm_ioctl_desc *ioctls; /** @num_ioctls: Number of entries in @ioctls. */ int num_ioctls; /** * @fops: * * File operations for the DRM device node. See the discussion in * :ref:`file operations<drm_driver_fops>` for in-depth coverage and * some examples. */ const struct file_operations *fops; }; void *__devm_drm_dev_alloc(struct device *parent, const struct drm_driver *driver, size_t size, size_t offset); struct dmem_cgroup_region * drmm_cgroup_register_region(struct drm_device *dev, const char *region_name, u64 size); /** * devm_drm_dev_alloc - Resource managed allocation of a &drm_device instance * @parent: Parent device object * @driver: DRM driver * @type: the type of the struct which contains struct &drm_device * @member: the name of the &drm_device within @type. * * This allocates and initialize a new DRM device. No device registration is done. * Call drm_dev_register() to advertice the device to user space and register it * with other core subsystems. This should be done last in the device * initialization sequence to make sure userspace can't access an inconsistent * state. * * The initial ref-count of the object is 1. Use drm_dev_get() and * drm_dev_put() to take and drop further ref-counts. * * It is recommended that drivers embed &struct drm_device into their own device * structure. * * Note that this manages the lifetime of the resulting &drm_device * automatically using devres. The DRM device initialized with this function is * automatically put on driver detach using drm_dev_put(). * * RETURNS: * Pointer to new DRM device, or ERR_PTR on failure. */ #define devm_drm_dev_alloc(parent, driver, type, member) \ ((type *) __devm_drm_dev_alloc(parent, driver, sizeof(type), \ offsetof(type, member))) struct drm_device *drm_dev_alloc(const struct drm_driver *driver, struct device *parent); void *__drm_dev_alloc(struct device *parent, const struct drm_driver *driver, size_t size, size_t offset); int drm_dev_register(struct drm_device *dev, unsigned long flags); void drm_dev_unregister(struct drm_device *dev); void drm_dev_get(struct drm_device *dev); void drm_dev_put(struct drm_device *dev); void drm_put_dev(struct drm_device *dev); bool drm_dev_enter(struct drm_device *dev, int *idx); void drm_dev_exit(int idx); void drm_dev_unplug(struct drm_device *dev); int drm_dev_wedged_event(struct drm_device *dev, unsigned long method, struct drm_wedge_task_info *info); /** * drm_dev_is_unplugged - is a DRM device unplugged * @dev: DRM device * * This function can be called to check whether a hotpluggable is unplugged. * Unplugging itself is singalled through drm_dev_unplug(). If a device is * unplugged, these two functions guarantee that any store before calling * drm_dev_unplug() is visible to callers of this function after it completes * * WARNING: This function fundamentally races against drm_dev_unplug(). It is * recommended that drivers instead use the underlying drm_dev_enter() and * drm_dev_exit() function pairs. */ static inline bool drm_dev_is_unplugged(struct drm_device *dev) { int idx; if (drm_dev_enter(dev, &idx)) { drm_dev_exit(idx); return false; } return true; } /** * drm_core_check_all_features - check driver feature flags mask * @dev: DRM device to check * @features: feature flag(s) mask * * This checks @dev for driver features, see &drm_driver.driver_features, * &drm_device.driver_features, and the various &enum drm_driver_feature flags. * * Returns true if all features in the @features mask are supported, false * otherwise. */ static inline bool drm_core_check_all_features(const struct drm_device *dev, u32 features) { u32 supported = dev->driver->driver_features & dev->driver_features; return features && (supported & features) == features; } /** * drm_core_check_feature - check driver feature flags * @dev: DRM device to check * @feature: feature flag * * This checks @dev for driver features, see &drm_driver.driver_features, * &drm_device.driver_features, and the various &enum drm_driver_feature flags. * * Returns true if the @feature is supported, false otherwise. */ static inline bool drm_core_check_feature(const struct drm_device *dev, enum drm_driver_feature feature) { return drm_core_check_all_features(dev, feature); } /** * drm_drv_uses_atomic_modeset - check if the driver implements * atomic_commit() * @dev: DRM device * * This check is useful if drivers do not have DRIVER_ATOMIC set but * have atomic modesetting internally implemented. */ static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_ATOMIC) || (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL); } /* TODO: Inline drm_firmware_drivers_only() in all its callers. */ static inline bool drm_firmware_drivers_only(void) { return video_firmware_drivers_only(); } #if defined(CONFIG_DEBUG_FS) void drm_debugfs_dev_init(struct drm_device *dev); void drm_debugfs_init_root(void); void drm_debugfs_remove_root(void); void drm_debugfs_bridge_params(void); #else static inline void drm_debugfs_dev_init(struct drm_device *dev) { } static inline void drm_debugfs_init_root(void) { } static inline void drm_debugfs_remove_root(void) { } static inline void drm_debugfs_bridge_params(void) { } #endif #endif |
| 2 2 2 2 1 2 1 1 2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 | // SPDX-License-Identifier: GPL-2.0-or-later /* * HackRF driver * * Copyright (C) 2014 Antti Palosaari <crope@iki.fi> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> #include <media/videobuf2-v4l2.h> #include <media/videobuf2-vmalloc.h> /* * Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too * strong signal or transmitting to bad antenna. * Set RF gain control to 'grabbed' state by default for sure. */ static bool hackrf_enable_rf_gain_ctrl; module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644); MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)"); /* HackRF USB API commands (from HackRF Library) */ enum { CMD_SET_TRANSCEIVER_MODE = 0x01, CMD_SAMPLE_RATE_SET = 0x06, CMD_BASEBAND_FILTER_BANDWIDTH_SET = 0x07, CMD_BOARD_ID_READ = 0x0e, CMD_VERSION_STRING_READ = 0x0f, CMD_SET_FREQ = 0x10, CMD_AMP_ENABLE = 0x11, CMD_SET_LNA_GAIN = 0x13, CMD_SET_VGA_GAIN = 0x14, CMD_SET_TXVGA_GAIN = 0x15, }; /* * bEndpointAddress 0x81 EP 1 IN * Transfer Type Bulk * wMaxPacketSize 0x0200 1x 512 bytes */ #define MAX_BULK_BUFS (6) #define BULK_BUFFER_SIZE (128 * 512) static const struct v4l2_frequency_band bands_adc_dac[] = { { .tuner = 0, .type = V4L2_TUNER_SDR, .index = 0, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 200000, .rangehigh = 24000000, }, }; static const struct v4l2_frequency_band bands_rx_tx[] = { { .tuner = 1, .type = V4L2_TUNER_RF, .index = 0, .capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS, .rangelow = 1, .rangehigh = 4294967294LL, /* max u32, hw goes over 7GHz */ }, }; /* stream formats */ struct hackrf_format { u32 pixelformat; u32 buffersize; }; /* format descriptions for capture and preview */ static struct hackrf_format formats[] = { { .pixelformat = V4L2_SDR_FMT_CS8, .buffersize = BULK_BUFFER_SIZE, }, }; static const unsigned int NUM_FORMATS = ARRAY_SIZE(formats); /* intermediate buffers with raw data from the USB device */ struct hackrf_buffer { struct vb2_v4l2_buffer vb; struct list_head list; }; struct hackrf_dev { #define USB_STATE_URB_BUF 1 /* XXX: set manually */ #define RX_ON 4 #define TX_ON 5 #define RX_ADC_FREQUENCY 11 #define TX_DAC_FREQUENCY 12 #define RX_BANDWIDTH 13 #define TX_BANDWIDTH 14 #define RX_RF_FREQUENCY 15 #define TX_RF_FREQUENCY 16 #define RX_RF_GAIN 17 #define TX_RF_GAIN 18 #define RX_IF_GAIN 19 #define RX_LNA_GAIN 20 #define TX_LNA_GAIN 21 unsigned long flags; struct usb_interface *intf; struct device *dev; struct usb_device *udev; struct video_device rx_vdev; struct video_device tx_vdev; struct v4l2_device v4l2_dev; /* videobuf2 queue and queued buffers list */ struct vb2_queue rx_vb2_queue; struct vb2_queue tx_vb2_queue; struct list_head rx_buffer_list; struct list_head tx_buffer_list; spinlock_t buffer_list_lock; /* Protects buffer_list */ unsigned int sequence; /* Buffer sequence counter */ unsigned int vb_full; /* vb is full and packets dropped */ unsigned int vb_empty; /* vb is empty and packets dropped */ /* Note if taking both locks v4l2_lock must always be locked first! */ struct mutex v4l2_lock; /* Protects everything else */ struct mutex vb_queue_lock; /* Protects vb_queue */ struct urb *urb_list[MAX_BULK_BUFS]; int buf_num; unsigned long buf_size; u8 *buf_list[MAX_BULK_BUFS]; dma_addr_t dma_addr[MAX_BULK_BUFS]; int urbs_initialized; int urbs_submitted; /* USB control message buffer */ #define BUF_SIZE 24 u8 buf[BUF_SIZE]; /* Current configuration */ unsigned int f_adc; unsigned int f_dac; unsigned int f_rx; unsigned int f_tx; u32 pixelformat; u32 buffersize; /* Controls */ struct v4l2_ctrl_handler rx_ctrl_handler; struct v4l2_ctrl *rx_bandwidth_auto; struct v4l2_ctrl *rx_bandwidth; struct v4l2_ctrl *rx_rf_gain; struct v4l2_ctrl *rx_lna_gain; struct v4l2_ctrl *rx_if_gain; struct v4l2_ctrl_handler tx_ctrl_handler; struct v4l2_ctrl *tx_bandwidth_auto; struct v4l2_ctrl *tx_bandwidth; struct v4l2_ctrl *tx_rf_gain; struct v4l2_ctrl *tx_lna_gain; /* Sample rate calc */ unsigned long jiffies_next; unsigned int sample; unsigned int sample_measured; }; #define hackrf_dbg_usb_control_msg(_dev, _r, _t, _v, _i, _b, _l) { \ char *_direction; \ if (_t & USB_DIR_IN) \ _direction = "<<<"; \ else \ _direction = ">>>"; \ dev_dbg(_dev, "%02x %02x %02x %02x %02x %02x %02x %02x %s %*ph\n", \ _t, _r, _v & 0xff, _v >> 8, _i & 0xff, \ _i >> 8, _l & 0xff, _l >> 8, _direction, _l, _b); \ } /* execute firmware command */ static int hackrf_ctrl_msg(struct hackrf_dev *dev, u8 request, u16 value, u16 index, u8 *data, u16 size) { int ret; unsigned int pipe; u8 requesttype; switch (request) { case CMD_SET_TRANSCEIVER_MODE: case CMD_SET_FREQ: case CMD_AMP_ENABLE: case CMD_SAMPLE_RATE_SET: case CMD_BASEBAND_FILTER_BANDWIDTH_SET: pipe = usb_sndctrlpipe(dev->udev, 0); requesttype = (USB_TYPE_VENDOR | USB_DIR_OUT); break; case CMD_BOARD_ID_READ: case CMD_VERSION_STRING_READ: case CMD_SET_LNA_GAIN: case CMD_SET_VGA_GAIN: case CMD_SET_TXVGA_GAIN: pipe = usb_rcvctrlpipe(dev->udev, 0); requesttype = (USB_TYPE_VENDOR | USB_DIR_IN); break; default: dev_err(dev->dev, "Unknown command %02x\n", request); ret = -EINVAL; goto err; } /* write request */ if (!(requesttype & USB_DIR_IN)) memcpy(dev->buf, data, size); ret = usb_control_msg(dev->udev, pipe, request, requesttype, value, index, dev->buf, size, 1000); hackrf_dbg_usb_control_msg(dev->dev, request, requesttype, value, index, dev->buf, size); if (ret < 0) { dev_err(dev->dev, "usb_control_msg() failed %d request %02x\n", ret, request); goto err; } /* read request */ if (requesttype & USB_DIR_IN) memcpy(data, dev->buf, size); return 0; err: return ret; } static int hackrf_set_params(struct hackrf_dev *dev) { struct usb_interface *intf = dev->intf; int ret, i; u8 buf[8], u8tmp; unsigned int uitmp, uitmp1, uitmp2; const bool rx = test_bit(RX_ON, &dev->flags); const bool tx = test_bit(TX_ON, &dev->flags); static const struct { u32 freq; } bandwidth_lut[] = { { 1750000}, /* 1.75 MHz */ { 2500000}, /* 2.5 MHz */ { 3500000}, /* 3.5 MHz */ { 5000000}, /* 5 MHz */ { 5500000}, /* 5.5 MHz */ { 6000000}, /* 6 MHz */ { 7000000}, /* 7 MHz */ { 8000000}, /* 8 MHz */ { 9000000}, /* 9 MHz */ {10000000}, /* 10 MHz */ {12000000}, /* 12 MHz */ {14000000}, /* 14 MHz */ {15000000}, /* 15 MHz */ {20000000}, /* 20 MHz */ {24000000}, /* 24 MHz */ {28000000}, /* 28 MHz */ }; if (!rx && !tx) { dev_dbg(&intf->dev, "device is sleeping\n"); return 0; } /* ADC / DAC frequency */ if (rx && test_and_clear_bit(RX_ADC_FREQUENCY, &dev->flags)) { dev_dbg(&intf->dev, "RX ADC frequency=%u Hz\n", dev->f_adc); uitmp1 = dev->f_adc; uitmp2 = 1; set_bit(TX_DAC_FREQUENCY, &dev->flags); } else if (tx && test_and_clear_bit(TX_DAC_FREQUENCY, &dev->flags)) { dev_dbg(&intf->dev, "TX DAC frequency=%u Hz\n", dev->f_dac); uitmp1 = dev->f_dac; uitmp2 = 1; set_bit(RX_ADC_FREQUENCY, &dev->flags); } else { uitmp1 = uitmp2 = 0; } if (uitmp1 || uitmp2) { buf[0] = (uitmp1 >> 0) & 0xff; buf[1] = (uitmp1 >> 8) & 0xff; buf[2] = (uitmp1 >> 16) & 0xff; buf[3] = (uitmp1 >> 24) & 0xff; buf[4] = (uitmp2 >> 0) & 0xff; buf[5] = (uitmp2 >> 8) & 0xff; buf[6] = (uitmp2 >> 16) & 0xff; buf[7] = (uitmp2 >> 24) & 0xff; ret = hackrf_ctrl_msg(dev, CMD_SAMPLE_RATE_SET, 0, 0, buf, 8); if (ret) goto err; } /* bandwidth */ if (rx && test_and_clear_bit(RX_BANDWIDTH, &dev->flags)) { if (dev->rx_bandwidth_auto->val == true) uitmp = dev->f_adc; else uitmp = dev->rx_bandwidth->val; for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) { if (uitmp <= bandwidth_lut[i].freq) { uitmp = bandwidth_lut[i].freq; break; } } dev->rx_bandwidth->val = uitmp; dev->rx_bandwidth->cur.val = uitmp; dev_dbg(&intf->dev, "RX bandwidth selected=%u\n", uitmp); set_bit(TX_BANDWIDTH, &dev->flags); } else if (tx && test_and_clear_bit(TX_BANDWIDTH, &dev->flags)) { if (dev->tx_bandwidth_auto->val == true) uitmp = dev->f_dac; else uitmp = dev->tx_bandwidth->val; for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) { if (uitmp <= bandwidth_lut[i].freq) { uitmp = bandwidth_lut[i].freq; break; } } dev->tx_bandwidth->val = uitmp; dev->tx_bandwidth->cur.val = uitmp; dev_dbg(&intf->dev, "TX bandwidth selected=%u\n", uitmp); set_bit(RX_BANDWIDTH, &dev->flags); } else { uitmp = 0; } if (uitmp) { uitmp1 = uitmp2 = 0; uitmp1 |= ((uitmp >> 0) & 0xff) << 0; uitmp1 |= ((uitmp >> 8) & 0xff) << 8; uitmp2 |= ((uitmp >> 16) & 0xff) << 0; uitmp2 |= ((uitmp >> 24) & 0xff) << 8; ret = hackrf_ctrl_msg(dev, CMD_BASEBAND_FILTER_BANDWIDTH_SET, uitmp1, uitmp2, NULL, 0); if (ret) goto err; } /* RX / TX RF frequency */ if (rx && test_and_clear_bit(RX_RF_FREQUENCY, &dev->flags)) { dev_dbg(&intf->dev, "RX RF frequency=%u Hz\n", dev->f_rx); uitmp1 = dev->f_rx / 1000000; uitmp2 = dev->f_rx % 1000000; set_bit(TX_RF_FREQUENCY, &dev->flags); } else if (tx && test_and_clear_bit(TX_RF_FREQUENCY, &dev->flags)) { dev_dbg(&intf->dev, "TX RF frequency=%u Hz\n", dev->f_tx); uitmp1 = dev->f_tx / 1000000; uitmp2 = dev->f_tx % 1000000; set_bit(RX_RF_FREQUENCY, &dev->flags); } else { uitmp1 = uitmp2 = 0; } if (uitmp1 || uitmp2) { buf[0] = (uitmp1 >> 0) & 0xff; buf[1] = (uitmp1 >> 8) & 0xff; buf[2] = (uitmp1 >> 16) & 0xff; buf[3] = (uitmp1 >> 24) & 0xff; buf[4] = (uitmp2 >> 0) & 0xff; buf[5] = (uitmp2 >> 8) & 0xff; buf[6] = (uitmp2 >> 16) & 0xff; buf[7] = (uitmp2 >> 24) & 0xff; ret = hackrf_ctrl_msg(dev, CMD_SET_FREQ, 0, 0, buf, 8); if (ret) goto err; } /* RX RF gain */ if (rx && test_and_clear_bit(RX_RF_GAIN, &dev->flags)) { dev_dbg(&intf->dev, "RX RF gain val=%d->%d\n", dev->rx_rf_gain->cur.val, dev->rx_rf_gain->val); u8tmp = (dev->rx_rf_gain->val) ? 1 : 0; ret = hackrf_ctrl_msg(dev, CMD_AMP_ENABLE, u8tmp, 0, NULL, 0); if (ret) goto err; set_bit(TX_RF_GAIN, &dev->flags); } /* TX RF gain */ if (tx && test_and_clear_bit(TX_RF_GAIN, &dev->flags)) { dev_dbg(&intf->dev, "TX RF gain val=%d->%d\n", dev->tx_rf_gain->cur.val, dev->tx_rf_gain->val); u8tmp = (dev->tx_rf_gain->val) ? 1 : 0; ret = hackrf_ctrl_msg(dev, CMD_AMP_ENABLE, u8tmp, 0, NULL, 0); if (ret) goto err; set_bit(RX_RF_GAIN, &dev->flags); } /* RX LNA gain */ if (rx && test_and_clear_bit(RX_LNA_GAIN, &dev->flags)) { dev_dbg(dev->dev, "RX LNA gain val=%d->%d\n", dev->rx_lna_gain->cur.val, dev->rx_lna_gain->val); ret = hackrf_ctrl_msg(dev, CMD_SET_LNA_GAIN, 0, dev->rx_lna_gain->val, &u8tmp, 1); if (ret) goto err; } /* RX IF gain */ if (rx && test_and_clear_bit(RX_IF_GAIN, &dev->flags)) { dev_dbg(&intf->dev, "IF gain val=%d->%d\n", dev->rx_if_gain->cur.val, dev->rx_if_gain->val); ret = hackrf_ctrl_msg(dev, CMD_SET_VGA_GAIN, 0, dev->rx_if_gain->val, &u8tmp, 1); if (ret) goto err; } /* TX LNA gain */ if (tx && test_and_clear_bit(TX_LNA_GAIN, &dev->flags)) { dev_dbg(&intf->dev, "TX LNA gain val=%d->%d\n", dev->tx_lna_gain->cur.val, dev->tx_lna_gain->val); ret = hackrf_ctrl_msg(dev, CMD_SET_TXVGA_GAIN, 0, dev->tx_lna_gain->val, &u8tmp, 1); if (ret) goto err; } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } /* Private functions */ static struct hackrf_buffer *hackrf_get_next_buffer(struct hackrf_dev *dev, struct list_head *buffer_list) { unsigned long flags; struct hackrf_buffer *buffer = NULL; spin_lock_irqsave(&dev->buffer_list_lock, flags); if (list_empty(buffer_list)) goto leave; buffer = list_entry(buffer_list->next, struct hackrf_buffer, list); list_del(&buffer->list); leave: spin_unlock_irqrestore(&dev->buffer_list_lock, flags); return buffer; } static void hackrf_copy_stream(struct hackrf_dev *dev, void *dst, void *src, unsigned int src_len) { memcpy(dst, src, src_len); /* calculate sample rate and output it in 10 seconds intervals */ if (unlikely(time_is_before_jiffies(dev->jiffies_next))) { #define MSECS 10000UL unsigned int msecs = jiffies_to_msecs(jiffies - dev->jiffies_next + msecs_to_jiffies(MSECS)); unsigned int samples = dev->sample - dev->sample_measured; dev->jiffies_next = jiffies + msecs_to_jiffies(MSECS); dev->sample_measured = dev->sample; dev_dbg(dev->dev, "slen=%u samples=%u msecs=%u sample rate=%lu\n", src_len, samples, msecs, samples * 1000UL / msecs); } /* total number of samples */ dev->sample += src_len / 2; } /* * This gets called for the bulk stream pipe. This is done in interrupt * time, so it has to be fast, not crash, and not stall. Neat. */ static void hackrf_urb_complete_in(struct urb *urb) { struct hackrf_dev *dev = urb->context; struct usb_interface *intf = dev->intf; struct hackrf_buffer *buffer; unsigned int len; dev_dbg_ratelimited(&intf->dev, "status=%d length=%u/%u\n", urb->status, urb->actual_length, urb->transfer_buffer_length); switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dev_err_ratelimited(&intf->dev, "URB failed %d\n", urb->status); goto exit_usb_submit_urb; } /* get buffer to write */ buffer = hackrf_get_next_buffer(dev, &dev->rx_buffer_list); if (unlikely(buffer == NULL)) { dev->vb_full++; dev_notice_ratelimited(&intf->dev, "buffer is full - %u packets dropped\n", dev->vb_full); goto exit_usb_submit_urb; } len = min_t(unsigned long, vb2_plane_size(&buffer->vb.vb2_buf, 0), urb->actual_length); hackrf_copy_stream(dev, vb2_plane_vaddr(&buffer->vb.vb2_buf, 0), urb->transfer_buffer, len); vb2_set_plane_payload(&buffer->vb.vb2_buf, 0, len); buffer->vb.sequence = dev->sequence++; buffer->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_DONE); exit_usb_submit_urb: usb_submit_urb(urb, GFP_ATOMIC); } static void hackrf_urb_complete_out(struct urb *urb) { struct hackrf_dev *dev = urb->context; struct usb_interface *intf = dev->intf; struct hackrf_buffer *buffer; unsigned int len; dev_dbg_ratelimited(&intf->dev, "status=%d length=%u/%u\n", urb->status, urb->actual_length, urb->transfer_buffer_length); switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dev_err_ratelimited(&intf->dev, "URB failed %d\n", urb->status); } /* get buffer to read */ buffer = hackrf_get_next_buffer(dev, &dev->tx_buffer_list); if (unlikely(buffer == NULL)) { dev->vb_empty++; dev_notice_ratelimited(&intf->dev, "buffer is empty - %u packets dropped\n", dev->vb_empty); urb->actual_length = 0; goto exit_usb_submit_urb; } len = min_t(unsigned long, urb->transfer_buffer_length, vb2_get_plane_payload(&buffer->vb.vb2_buf, 0)); hackrf_copy_stream(dev, urb->transfer_buffer, vb2_plane_vaddr(&buffer->vb.vb2_buf, 0), len); urb->actual_length = len; buffer->vb.sequence = dev->sequence++; buffer->vb.vb2_buf.timestamp = ktime_get_ns(); vb2_buffer_done(&buffer->vb.vb2_buf, VB2_BUF_STATE_DONE); exit_usb_submit_urb: usb_submit_urb(urb, GFP_ATOMIC); } static int hackrf_kill_urbs(struct hackrf_dev *dev) { int i; for (i = dev->urbs_submitted - 1; i >= 0; i--) { dev_dbg(dev->dev, "kill urb=%d\n", i); /* stop the URB */ usb_kill_urb(dev->urb_list[i]); } dev->urbs_submitted = 0; return 0; } static int hackrf_submit_urbs(struct hackrf_dev *dev) { int i, ret; for (i = 0; i < dev->urbs_initialized; i++) { dev_dbg(dev->dev, "submit urb=%d\n", i); ret = usb_submit_urb(dev->urb_list[i], GFP_KERNEL); if (ret) { dev_err(dev->dev, "Could not submit URB no. %d - get them all back\n", i); hackrf_kill_urbs(dev); return ret; } dev->urbs_submitted++; } return 0; } static int hackrf_free_stream_bufs(struct hackrf_dev *dev) { if (dev->flags & USB_STATE_URB_BUF) { while (dev->buf_num) { dev->buf_num--; dev_dbg(dev->dev, "free buf=%d\n", dev->buf_num); usb_free_coherent(dev->udev, dev->buf_size, dev->buf_list[dev->buf_num], dev->dma_addr[dev->buf_num]); } } dev->flags &= ~USB_STATE_URB_BUF; return 0; } static int hackrf_alloc_stream_bufs(struct hackrf_dev *dev) { dev->buf_num = 0; dev->buf_size = BULK_BUFFER_SIZE; dev_dbg(dev->dev, "all in all I will use %u bytes for streaming\n", MAX_BULK_BUFS * BULK_BUFFER_SIZE); for (dev->buf_num = 0; dev->buf_num < MAX_BULK_BUFS; dev->buf_num++) { dev->buf_list[dev->buf_num] = usb_alloc_coherent(dev->udev, BULK_BUFFER_SIZE, GFP_KERNEL, &dev->dma_addr[dev->buf_num]); if (!dev->buf_list[dev->buf_num]) { dev_dbg(dev->dev, "alloc buf=%d failed\n", dev->buf_num); hackrf_free_stream_bufs(dev); return -ENOMEM; } dev_dbg(dev->dev, "alloc buf=%d %p (dma %llu)\n", dev->buf_num, dev->buf_list[dev->buf_num], (long long)dev->dma_addr[dev->buf_num]); dev->flags |= USB_STATE_URB_BUF; } return 0; } static int hackrf_free_urbs(struct hackrf_dev *dev) { int i; hackrf_kill_urbs(dev); for (i = dev->urbs_initialized - 1; i >= 0; i--) { if (dev->urb_list[i]) { dev_dbg(dev->dev, "free urb=%d\n", i); /* free the URBs */ usb_free_urb(dev->urb_list[i]); } } dev->urbs_initialized = 0; return 0; } static int hackrf_alloc_urbs(struct hackrf_dev *dev, bool rcv) { int i, j; unsigned int pipe; usb_complete_t complete; if (rcv) { pipe = usb_rcvbulkpipe(dev->udev, 0x81); complete = &hackrf_urb_complete_in; } else { pipe = usb_sndbulkpipe(dev->udev, 0x02); complete = &hackrf_urb_complete_out; } /* allocate the URBs */ for (i = 0; i < MAX_BULK_BUFS; i++) { dev_dbg(dev->dev, "alloc urb=%d\n", i); dev->urb_list[i] = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_list[i]) { for (j = 0; j < i; j++) usb_free_urb(dev->urb_list[j]); return -ENOMEM; } usb_fill_bulk_urb(dev->urb_list[i], dev->udev, pipe, dev->buf_list[i], BULK_BUFFER_SIZE, complete, dev); dev->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; dev->urb_list[i]->transfer_dma = dev->dma_addr[i]; dev->urbs_initialized++; } return 0; } /* The user yanked out the cable... */ static void hackrf_disconnect(struct usb_interface *intf) { struct v4l2_device *v = usb_get_intfdata(intf); struct hackrf_dev *dev = container_of(v, struct hackrf_dev, v4l2_dev); dev_dbg(dev->dev, "\n"); mutex_lock(&dev->vb_queue_lock); mutex_lock(&dev->v4l2_lock); /* No need to keep the urbs around after disconnection */ dev->udev = NULL; v4l2_device_disconnect(&dev->v4l2_dev); video_unregister_device(&dev->tx_vdev); video_unregister_device(&dev->rx_vdev); mutex_unlock(&dev->v4l2_lock); mutex_unlock(&dev->vb_queue_lock); v4l2_device_put(&dev->v4l2_dev); } /* Videobuf2 operations */ static void hackrf_return_all_buffers(struct vb2_queue *vq, enum vb2_buffer_state state) { struct hackrf_dev *dev = vb2_get_drv_priv(vq); struct usb_interface *intf = dev->intf; struct hackrf_buffer *buffer, *node; struct list_head *buffer_list; unsigned long flags; dev_dbg(&intf->dev, "\n"); if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE) buffer_list = &dev->rx_buffer_list; else buffer_list = &dev->tx_buffer_list; spin_lock_irqsave(&dev->buffer_list_lock, flags); list_for_each_entry_safe(buffer, node, buffer_list, list) { dev_dbg(&intf->dev, "list_for_each_entry_safe\n"); vb2_buffer_done(&buffer->vb.vb2_buf, state); list_del(&buffer->list); } spin_unlock_irqrestore(&dev->buffer_list_lock, flags); } static int hackrf_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct hackrf_dev *dev = vb2_get_drv_priv(vq); unsigned int q_num_bufs = vb2_get_num_buffers(vq); dev_dbg(dev->dev, "nbuffers=%d\n", *nbuffers); /* Need at least 8 buffers */ if (q_num_bufs + *nbuffers < 8) *nbuffers = 8 - q_num_bufs; *nplanes = 1; sizes[0] = PAGE_ALIGN(dev->buffersize); dev_dbg(dev->dev, "nbuffers=%d sizes[0]=%d\n", *nbuffers, sizes[0]); return 0; } static void hackrf_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vb2_queue *vq = vb->vb2_queue; struct hackrf_dev *dev = vb2_get_drv_priv(vq); struct hackrf_buffer *buffer = container_of(vbuf, struct hackrf_buffer, vb); struct list_head *buffer_list; unsigned long flags; dev_dbg_ratelimited(&dev->intf->dev, "\n"); if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE) buffer_list = &dev->rx_buffer_list; else buffer_list = &dev->tx_buffer_list; spin_lock_irqsave(&dev->buffer_list_lock, flags); list_add_tail(&buffer->list, buffer_list); spin_unlock_irqrestore(&dev->buffer_list_lock, flags); } static int hackrf_start_streaming(struct vb2_queue *vq, unsigned int count) { struct hackrf_dev *dev = vb2_get_drv_priv(vq); struct usb_interface *intf = dev->intf; int ret; unsigned int mode; dev_dbg(&intf->dev, "count=%i\n", count); mutex_lock(&dev->v4l2_lock); /* Allow only RX or TX, not both same time */ if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE) { if (test_bit(TX_ON, &dev->flags)) { ret = -EBUSY; goto err_hackrf_return_all_buffers; } mode = 1; set_bit(RX_ON, &dev->flags); } else { if (test_bit(RX_ON, &dev->flags)) { ret = -EBUSY; goto err_hackrf_return_all_buffers; } mode = 2; set_bit(TX_ON, &dev->flags); } dev->sequence = 0; ret = hackrf_alloc_stream_bufs(dev); if (ret) goto err; ret = hackrf_alloc_urbs(dev, (mode == 1)); if (ret) goto err; ret = hackrf_submit_urbs(dev); if (ret) goto err; ret = hackrf_set_params(dev); if (ret) goto err; /* start hardware streaming */ ret = hackrf_ctrl_msg(dev, CMD_SET_TRANSCEIVER_MODE, mode, 0, NULL, 0); if (ret) goto err; mutex_unlock(&dev->v4l2_lock); return 0; err: hackrf_kill_urbs(dev); hackrf_free_urbs(dev); hackrf_free_stream_bufs(dev); clear_bit(RX_ON, &dev->flags); clear_bit(TX_ON, &dev->flags); err_hackrf_return_all_buffers: hackrf_return_all_buffers(vq, VB2_BUF_STATE_QUEUED); mutex_unlock(&dev->v4l2_lock); dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static void hackrf_stop_streaming(struct vb2_queue *vq) { struct hackrf_dev *dev = vb2_get_drv_priv(vq); struct usb_interface *intf = dev->intf; dev_dbg(&intf->dev, "\n"); mutex_lock(&dev->v4l2_lock); /* stop hardware streaming */ hackrf_ctrl_msg(dev, CMD_SET_TRANSCEIVER_MODE, 0, 0, NULL, 0); hackrf_kill_urbs(dev); hackrf_free_urbs(dev); hackrf_free_stream_bufs(dev); hackrf_return_all_buffers(vq, VB2_BUF_STATE_ERROR); if (vq->type == V4L2_BUF_TYPE_SDR_CAPTURE) clear_bit(RX_ON, &dev->flags); else clear_bit(TX_ON, &dev->flags); mutex_unlock(&dev->v4l2_lock); } static const struct vb2_ops hackrf_vb2_ops = { .queue_setup = hackrf_queue_setup, .buf_queue = hackrf_buf_queue, .start_streaming = hackrf_start_streaming, .stop_streaming = hackrf_stop_streaming, }; static int hackrf_querycap(struct file *file, void *fh, struct v4l2_capability *cap) { struct hackrf_dev *dev = video_drvdata(file); struct usb_interface *intf = dev->intf; dev_dbg(&intf->dev, "\n"); cap->capabilities = V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER | V4L2_CAP_SDR_OUTPUT | V4L2_CAP_MODULATOR | V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS; strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver)); strscpy(cap->card, dev->rx_vdev.name, sizeof(cap->card)); usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info)); return 0; } static int hackrf_s_fmt_sdr(struct file *file, void *priv, struct v4l2_format *f) { struct hackrf_dev *dev = video_drvdata(file); struct video_device *vdev = video_devdata(file); struct vb2_queue *q; int i; dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n", (char *)&f->fmt.sdr.pixelformat); if (vdev->vfl_dir == VFL_DIR_RX) q = &dev->rx_vb2_queue; else q = &dev->tx_vb2_queue; if (vb2_is_busy(q)) return -EBUSY; for (i = 0; i < NUM_FORMATS; i++) { if (f->fmt.sdr.pixelformat == formats[i].pixelformat) { dev->pixelformat = formats[i].pixelformat; dev->buffersize = formats[i].buffersize; f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } dev->pixelformat = formats[0].pixelformat; dev->buffersize = formats[0].buffersize; f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; return 0; } static int hackrf_g_fmt_sdr(struct file *file, void *priv, struct v4l2_format *f) { struct hackrf_dev *dev = video_drvdata(file); dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n", (char *)&dev->pixelformat); f->fmt.sdr.pixelformat = dev->pixelformat; f->fmt.sdr.buffersize = dev->buffersize; return 0; } static int hackrf_try_fmt_sdr(struct file *file, void *priv, struct v4l2_format *f) { struct hackrf_dev *dev = video_drvdata(file); int i; dev_dbg(dev->dev, "pixelformat fourcc %4.4s\n", (char *)&f->fmt.sdr.pixelformat); for (i = 0; i < NUM_FORMATS; i++) { if (formats[i].pixelformat == f->fmt.sdr.pixelformat) { f->fmt.sdr.buffersize = formats[i].buffersize; return 0; } } f->fmt.sdr.pixelformat = formats[0].pixelformat; f->fmt.sdr.buffersize = formats[0].buffersize; return 0; } static int hackrf_enum_fmt_sdr(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct hackrf_dev *dev = video_drvdata(file); dev_dbg(dev->dev, "index=%d\n", f->index); if (f->index >= NUM_FORMATS) return -EINVAL; f->pixelformat = formats[f->index].pixelformat; return 0; } static int hackrf_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *v) { struct hackrf_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "index=%d\n", v->index); if (v->index == 0) ret = 0; else if (v->index == 1) ret = 0; else ret = -EINVAL; return ret; } static int hackrf_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct hackrf_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "index=%d\n", v->index); if (v->index == 0) { strscpy(v->name, "HackRF ADC", sizeof(v->name)); v->type = V4L2_TUNER_SDR; v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = bands_adc_dac[0].rangelow; v->rangehigh = bands_adc_dac[0].rangehigh; ret = 0; } else if (v->index == 1) { strscpy(v->name, "HackRF RF", sizeof(v->name)); v->type = V4L2_TUNER_RF; v->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; v->rangelow = bands_rx_tx[0].rangelow; v->rangehigh = bands_rx_tx[0].rangehigh; ret = 0; } else { ret = -EINVAL; } return ret; } static int hackrf_s_modulator(struct file *file, void *fh, const struct v4l2_modulator *a) { struct hackrf_dev *dev = video_drvdata(file); dev_dbg(dev->dev, "index=%d\n", a->index); return a->index > 1 ? -EINVAL : 0; } static int hackrf_g_modulator(struct file *file, void *fh, struct v4l2_modulator *a) { struct hackrf_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "index=%d\n", a->index); if (a->index == 0) { strscpy(a->name, "HackRF DAC", sizeof(a->name)); a->type = V4L2_TUNER_SDR; a->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; a->rangelow = bands_adc_dac[0].rangelow; a->rangehigh = bands_adc_dac[0].rangehigh; ret = 0; } else if (a->index == 1) { strscpy(a->name, "HackRF RF", sizeof(a->name)); a->type = V4L2_TUNER_RF; a->capability = V4L2_TUNER_CAP_1HZ | V4L2_TUNER_CAP_FREQ_BANDS; a->rangelow = bands_rx_tx[0].rangelow; a->rangehigh = bands_rx_tx[0].rangehigh; ret = 0; } else { ret = -EINVAL; } return ret; } static int hackrf_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct hackrf_dev *dev = video_drvdata(file); struct usb_interface *intf = dev->intf; struct video_device *vdev = video_devdata(file); int ret; unsigned int uitmp; dev_dbg(&intf->dev, "tuner=%d type=%d frequency=%u\n", f->tuner, f->type, f->frequency); if (f->tuner == 0) { uitmp = clamp(f->frequency, bands_adc_dac[0].rangelow, bands_adc_dac[0].rangehigh); if (vdev->vfl_dir == VFL_DIR_RX) { dev->f_adc = uitmp; set_bit(RX_ADC_FREQUENCY, &dev->flags); } else { dev->f_dac = uitmp; set_bit(TX_DAC_FREQUENCY, &dev->flags); } } else if (f->tuner == 1) { uitmp = clamp(f->frequency, bands_rx_tx[0].rangelow, bands_rx_tx[0].rangehigh); if (vdev->vfl_dir == VFL_DIR_RX) { dev->f_rx = uitmp; set_bit(RX_RF_FREQUENCY, &dev->flags); } else { dev->f_tx = uitmp; set_bit(TX_RF_FREQUENCY, &dev->flags); } } else { ret = -EINVAL; goto err; } ret = hackrf_set_params(dev); if (ret) goto err; return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int hackrf_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct hackrf_dev *dev = video_drvdata(file); struct usb_interface *intf = dev->intf; struct video_device *vdev = video_devdata(file); int ret; dev_dbg(dev->dev, "tuner=%d type=%d\n", f->tuner, f->type); if (f->tuner == 0) { f->type = V4L2_TUNER_SDR; if (vdev->vfl_dir == VFL_DIR_RX) f->frequency = dev->f_adc; else f->frequency = dev->f_dac; } else if (f->tuner == 1) { f->type = V4L2_TUNER_RF; if (vdev->vfl_dir == VFL_DIR_RX) f->frequency = dev->f_rx; else f->frequency = dev->f_tx; } else { ret = -EINVAL; goto err; } return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int hackrf_enum_freq_bands(struct file *file, void *priv, struct v4l2_frequency_band *band) { struct hackrf_dev *dev = video_drvdata(file); int ret; dev_dbg(dev->dev, "tuner=%d type=%d index=%d\n", band->tuner, band->type, band->index); if (band->tuner == 0) { if (band->index >= ARRAY_SIZE(bands_adc_dac)) { ret = -EINVAL; } else { *band = bands_adc_dac[band->index]; ret = 0; } } else if (band->tuner == 1) { if (band->index >= ARRAY_SIZE(bands_rx_tx)) { ret = -EINVAL; } else { *band = bands_rx_tx[band->index]; ret = 0; } } else { ret = -EINVAL; } return ret; } static const struct v4l2_ioctl_ops hackrf_ioctl_ops = { .vidioc_querycap = hackrf_querycap, .vidioc_s_fmt_sdr_cap = hackrf_s_fmt_sdr, .vidioc_g_fmt_sdr_cap = hackrf_g_fmt_sdr, .vidioc_enum_fmt_sdr_cap = hackrf_enum_fmt_sdr, .vidioc_try_fmt_sdr_cap = hackrf_try_fmt_sdr, .vidioc_s_fmt_sdr_out = hackrf_s_fmt_sdr, .vidioc_g_fmt_sdr_out = hackrf_g_fmt_sdr, .vidioc_enum_fmt_sdr_out = hackrf_enum_fmt_sdr, .vidioc_try_fmt_sdr_out = hackrf_try_fmt_sdr, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_create_bufs = vb2_ioctl_create_bufs, .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_expbuf = vb2_ioctl_expbuf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_s_tuner = hackrf_s_tuner, .vidioc_g_tuner = hackrf_g_tuner, .vidioc_s_modulator = hackrf_s_modulator, .vidioc_g_modulator = hackrf_g_modulator, .vidioc_s_frequency = hackrf_s_frequency, .vidioc_g_frequency = hackrf_g_frequency, .vidioc_enum_freq_bands = hackrf_enum_freq_bands, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_log_status = v4l2_ctrl_log_status, }; static const struct v4l2_file_operations hackrf_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .read = vb2_fop_read, .write = vb2_fop_write, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .unlocked_ioctl = video_ioctl2, }; static const struct video_device hackrf_template = { .name = "HackRF One", .release = video_device_release_empty, .fops = &hackrf_fops, .ioctl_ops = &hackrf_ioctl_ops, }; static void hackrf_video_release(struct v4l2_device *v) { struct hackrf_dev *dev = container_of(v, struct hackrf_dev, v4l2_dev); dev_dbg(dev->dev, "\n"); v4l2_ctrl_handler_free(&dev->rx_ctrl_handler); v4l2_ctrl_handler_free(&dev->tx_ctrl_handler); v4l2_device_unregister(&dev->v4l2_dev); kfree(dev); } static int hackrf_s_ctrl_rx(struct v4l2_ctrl *ctrl) { struct hackrf_dev *dev = container_of(ctrl->handler, struct hackrf_dev, rx_ctrl_handler); struct usb_interface *intf = dev->intf; int ret; switch (ctrl->id) { case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: case V4L2_CID_RF_TUNER_BANDWIDTH: set_bit(RX_BANDWIDTH, &dev->flags); break; case V4L2_CID_RF_TUNER_RF_GAIN: set_bit(RX_RF_GAIN, &dev->flags); break; case V4L2_CID_RF_TUNER_LNA_GAIN: set_bit(RX_LNA_GAIN, &dev->flags); break; case V4L2_CID_RF_TUNER_IF_GAIN: set_bit(RX_IF_GAIN, &dev->flags); break; default: dev_dbg(&intf->dev, "unknown ctrl: id=%d name=%s\n", ctrl->id, ctrl->name); ret = -EINVAL; goto err; } ret = hackrf_set_params(dev); if (ret) goto err; return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static int hackrf_s_ctrl_tx(struct v4l2_ctrl *ctrl) { struct hackrf_dev *dev = container_of(ctrl->handler, struct hackrf_dev, tx_ctrl_handler); struct usb_interface *intf = dev->intf; int ret; switch (ctrl->id) { case V4L2_CID_RF_TUNER_BANDWIDTH_AUTO: case V4L2_CID_RF_TUNER_BANDWIDTH: set_bit(TX_BANDWIDTH, &dev->flags); break; case V4L2_CID_RF_TUNER_LNA_GAIN: set_bit(TX_LNA_GAIN, &dev->flags); break; case V4L2_CID_RF_TUNER_RF_GAIN: set_bit(TX_RF_GAIN, &dev->flags); break; default: dev_dbg(&intf->dev, "unknown ctrl: id=%d name=%s\n", ctrl->id, ctrl->name); ret = -EINVAL; goto err; } ret = hackrf_set_params(dev); if (ret) goto err; return 0; err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } static const struct v4l2_ctrl_ops hackrf_ctrl_ops_rx = { .s_ctrl = hackrf_s_ctrl_rx, }; static const struct v4l2_ctrl_ops hackrf_ctrl_ops_tx = { .s_ctrl = hackrf_s_ctrl_tx, }; static int hackrf_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct hackrf_dev *dev; int ret; u8 u8tmp, buf[BUF_SIZE]; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto err; } mutex_init(&dev->v4l2_lock); mutex_init(&dev->vb_queue_lock); spin_lock_init(&dev->buffer_list_lock); INIT_LIST_HEAD(&dev->rx_buffer_list); INIT_LIST_HEAD(&dev->tx_buffer_list); dev->intf = intf; dev->dev = &intf->dev; dev->udev = interface_to_usbdev(intf); dev->pixelformat = formats[0].pixelformat; dev->buffersize = formats[0].buffersize; dev->f_adc = bands_adc_dac[0].rangelow; dev->f_dac = bands_adc_dac[0].rangelow; dev->f_rx = bands_rx_tx[0].rangelow; dev->f_tx = bands_rx_tx[0].rangelow; set_bit(RX_ADC_FREQUENCY, &dev->flags); set_bit(TX_DAC_FREQUENCY, &dev->flags); set_bit(RX_RF_FREQUENCY, &dev->flags); set_bit(TX_RF_FREQUENCY, &dev->flags); /* Detect device */ ret = hackrf_ctrl_msg(dev, CMD_BOARD_ID_READ, 0, 0, &u8tmp, 1); if (ret == 0) ret = hackrf_ctrl_msg(dev, CMD_VERSION_STRING_READ, 0, 0, buf, BUF_SIZE); if (ret) { dev_err(dev->dev, "Could not detect board\n"); goto err_kfree; } buf[BUF_SIZE - 1] = '\0'; dev_info(dev->dev, "Board ID: %02x\n", u8tmp); dev_info(dev->dev, "Firmware version: %s\n", buf); /* Init vb2 queue structure for receiver */ dev->rx_vb2_queue.type = V4L2_BUF_TYPE_SDR_CAPTURE; dev->rx_vb2_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; dev->rx_vb2_queue.ops = &hackrf_vb2_ops; dev->rx_vb2_queue.mem_ops = &vb2_vmalloc_memops; dev->rx_vb2_queue.drv_priv = dev; dev->rx_vb2_queue.buf_struct_size = sizeof(struct hackrf_buffer); dev->rx_vb2_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; dev->rx_vb2_queue.lock = &dev->vb_queue_lock; ret = vb2_queue_init(&dev->rx_vb2_queue); if (ret) { dev_err(dev->dev, "Could not initialize rx vb2 queue\n"); goto err_kfree; } /* Init vb2 queue structure for transmitter */ dev->tx_vb2_queue.type = V4L2_BUF_TYPE_SDR_OUTPUT; dev->tx_vb2_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_WRITE; dev->tx_vb2_queue.ops = &hackrf_vb2_ops; dev->tx_vb2_queue.mem_ops = &vb2_vmalloc_memops; dev->tx_vb2_queue.drv_priv = dev; dev->tx_vb2_queue.buf_struct_size = sizeof(struct hackrf_buffer); dev->tx_vb2_queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; dev->tx_vb2_queue.lock = &dev->vb_queue_lock; ret = vb2_queue_init(&dev->tx_vb2_queue); if (ret) { dev_err(dev->dev, "Could not initialize tx vb2 queue\n"); goto err_kfree; } /* Register controls for receiver */ v4l2_ctrl_handler_init(&dev->rx_ctrl_handler, 5); dev->rx_bandwidth_auto = v4l2_ctrl_new_std(&dev->rx_ctrl_handler, &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 0, 1); dev->rx_bandwidth = v4l2_ctrl_new_std(&dev->rx_ctrl_handler, &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_BANDWIDTH, 1750000, 28000000, 50000, 1750000); v4l2_ctrl_auto_cluster(2, &dev->rx_bandwidth_auto, 0, false); dev->rx_rf_gain = v4l2_ctrl_new_std(&dev->rx_ctrl_handler, &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_RF_GAIN, 0, 12, 12, 0); dev->rx_lna_gain = v4l2_ctrl_new_std(&dev->rx_ctrl_handler, &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 40, 8, 0); dev->rx_if_gain = v4l2_ctrl_new_std(&dev->rx_ctrl_handler, &hackrf_ctrl_ops_rx, V4L2_CID_RF_TUNER_IF_GAIN, 0, 62, 2, 0); if (dev->rx_ctrl_handler.error) { ret = dev->rx_ctrl_handler.error; dev_err(dev->dev, "Could not initialize controls\n"); goto err_v4l2_ctrl_handler_free_rx; } v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl); v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler); /* Register controls for transmitter */ v4l2_ctrl_handler_init(&dev->tx_ctrl_handler, 4); dev->tx_bandwidth_auto = v4l2_ctrl_new_std(&dev->tx_ctrl_handler, &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO, 0, 1, 0, 1); dev->tx_bandwidth = v4l2_ctrl_new_std(&dev->tx_ctrl_handler, &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_BANDWIDTH, 1750000, 28000000, 50000, 1750000); v4l2_ctrl_auto_cluster(2, &dev->tx_bandwidth_auto, 0, false); dev->tx_lna_gain = v4l2_ctrl_new_std(&dev->tx_ctrl_handler, &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_LNA_GAIN, 0, 47, 1, 0); dev->tx_rf_gain = v4l2_ctrl_new_std(&dev->tx_ctrl_handler, &hackrf_ctrl_ops_tx, V4L2_CID_RF_TUNER_RF_GAIN, 0, 15, 15, 0); if (dev->tx_ctrl_handler.error) { ret = dev->tx_ctrl_handler.error; dev_err(dev->dev, "Could not initialize controls\n"); goto err_v4l2_ctrl_handler_free_tx; } v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl); v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler); /* Register the v4l2_device structure */ dev->v4l2_dev.release = hackrf_video_release; ret = v4l2_device_register(&intf->dev, &dev->v4l2_dev); if (ret) { dev_err(dev->dev, "Failed to register v4l2-device (%d)\n", ret); goto err_v4l2_ctrl_handler_free_tx; } /* Init video_device structure for receiver */ dev->rx_vdev = hackrf_template; dev->rx_vdev.queue = &dev->rx_vb2_queue; dev->rx_vdev.v4l2_dev = &dev->v4l2_dev; dev->rx_vdev.ctrl_handler = &dev->rx_ctrl_handler; dev->rx_vdev.lock = &dev->v4l2_lock; dev->rx_vdev.vfl_dir = VFL_DIR_RX; dev->rx_vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_SDR_CAPTURE | V4L2_CAP_TUNER; video_set_drvdata(&dev->rx_vdev, dev); ret = video_register_device(&dev->rx_vdev, VFL_TYPE_SDR, -1); if (ret) { dev_err(dev->dev, "Failed to register as video device (%d)\n", ret); goto err_v4l2_device_unregister; } dev_info(dev->dev, "Registered as %s\n", video_device_node_name(&dev->rx_vdev)); /* Init video_device structure for transmitter */ dev->tx_vdev = hackrf_template; dev->tx_vdev.queue = &dev->tx_vb2_queue; dev->tx_vdev.v4l2_dev = &dev->v4l2_dev; dev->tx_vdev.ctrl_handler = &dev->tx_ctrl_handler; dev->tx_vdev.lock = &dev->v4l2_lock; dev->tx_vdev.vfl_dir = VFL_DIR_TX; dev->tx_vdev.device_caps = V4L2_CAP_STREAMING | V4L2_CAP_READWRITE | V4L2_CAP_SDR_OUTPUT | V4L2_CAP_MODULATOR; video_set_drvdata(&dev->tx_vdev, dev); ret = video_register_device(&dev->tx_vdev, VFL_TYPE_SDR, -1); if (ret) { dev_err(dev->dev, "Failed to register as video device (%d)\n", ret); goto err_video_unregister_device_rx; } dev_info(dev->dev, "Registered as %s\n", video_device_node_name(&dev->tx_vdev)); dev_notice(dev->dev, "SDR API is still slightly experimental and functionality changes may follow\n"); return 0; err_video_unregister_device_rx: video_unregister_device(&dev->rx_vdev); err_v4l2_device_unregister: v4l2_device_unregister(&dev->v4l2_dev); err_v4l2_ctrl_handler_free_tx: v4l2_ctrl_handler_free(&dev->tx_ctrl_handler); err_v4l2_ctrl_handler_free_rx: v4l2_ctrl_handler_free(&dev->rx_ctrl_handler); err_kfree: kfree(dev); err: dev_dbg(&intf->dev, "failed=%d\n", ret); return ret; } /* USB device ID list */ static const struct usb_device_id hackrf_id_table[] = { { USB_DEVICE(0x1d50, 0x6089) }, /* HackRF One */ { } }; MODULE_DEVICE_TABLE(usb, hackrf_id_table); /* USB subsystem interface */ static struct usb_driver hackrf_driver = { .name = KBUILD_MODNAME, .probe = hackrf_probe, .disconnect = hackrf_disconnect, .id_table = hackrf_id_table, }; module_usb_driver(hackrf_driver); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>"); MODULE_DESCRIPTION("HackRF"); MODULE_LICENSE("GPL"); |
| 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 | // SPDX-License-Identifier: GPL-2.0-or-later /* * DSA tagging protocol handling * * Copyright (c) 2008-2009 Marvell Semiconductor * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org> * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch> */ #include <linux/netdevice.h> #include <linux/ptp_classify.h> #include <linux/skbuff.h> #include <net/dsa.h> #include <net/dst_metadata.h> #include "tag.h" #include "user.h" static LIST_HEAD(dsa_tag_drivers_list); static DEFINE_MUTEX(dsa_tag_drivers_lock); /* Determine if we should defer delivery of skb until we have a rx timestamp. * * Called from dsa_switch_rcv. For now, this will only work if tagging is * enabled on the switch. Normally the MAC driver would retrieve the hardware * timestamp when it reads the packet out of the hardware. However in a DSA * switch, the DSA driver owning the interface to which the packet is * delivered is never notified unless we do so here. */ static bool dsa_skb_defer_rx_timestamp(struct dsa_user_priv *p, struct sk_buff *skb) { struct dsa_switch *ds = p->dp->ds; unsigned int type; if (!ds->ops->port_rxtstamp) return false; if (skb_headroom(skb) < ETH_HLEN) return false; __skb_push(skb, ETH_HLEN); type = ptp_classify_raw(skb); __skb_pull(skb, ETH_HLEN); if (type == PTP_CLASS_NONE) return false; return ds->ops->port_rxtstamp(ds, p->dp->index, skb, type); } static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *unused) { struct metadata_dst *md_dst = skb_metadata_dst(skb); struct dsa_port *cpu_dp = dev->dsa_ptr; struct sk_buff *nskb = NULL; struct dsa_user_priv *p; if (unlikely(!cpu_dp)) { kfree_skb(skb); return 0; } skb = skb_unshare(skb, GFP_ATOMIC); if (!skb) return 0; if (md_dst && md_dst->type == METADATA_HW_PORT_MUX) { unsigned int port = md_dst->u.port_info.port_id; skb_dst_drop(skb); if (!skb_has_extensions(skb)) skb->slow_gro = 0; skb->dev = dsa_conduit_find_user(dev, 0, port); if (likely(skb->dev)) { dsa_default_offload_fwd_mark(skb); nskb = skb; } } else { nskb = cpu_dp->rcv(skb, dev); } if (!nskb) { kfree_skb(skb); return 0; } skb = nskb; skb_push(skb, ETH_HLEN); skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); if (unlikely(!dsa_user_dev_check(skb->dev))) { /* Packet is to be injected directly on an upper * device, e.g. a team/bond, so skip all DSA-port * specific actions. */ netif_rx(skb); return 0; } p = netdev_priv(skb->dev); if (unlikely(cpu_dp->ds->untag_bridge_pvid || cpu_dp->ds->untag_vlan_aware_bridge_pvid)) { nskb = dsa_software_vlan_untag(skb); if (!nskb) { kfree_skb(skb); return 0; } skb = nskb; } dev_sw_netstats_rx_add(skb->dev, skb->len + ETH_HLEN); if (dsa_skb_defer_rx_timestamp(p, skb)) return 0; gro_cells_receive(&p->gcells, skb); return 0; } struct packet_type dsa_pack_type __read_mostly = { .type = cpu_to_be16(ETH_P_XDSA), .func = dsa_switch_rcv, }; static void dsa_tag_driver_register(struct dsa_tag_driver *dsa_tag_driver, struct module *owner) { dsa_tag_driver->owner = owner; mutex_lock(&dsa_tag_drivers_lock); list_add_tail(&dsa_tag_driver->list, &dsa_tag_drivers_list); mutex_unlock(&dsa_tag_drivers_lock); } void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[], unsigned int count, struct module *owner) { unsigned int i; for (i = 0; i < count; i++) dsa_tag_driver_register(dsa_tag_driver_array[i], owner); } static void dsa_tag_driver_unregister(struct dsa_tag_driver *dsa_tag_driver) { mutex_lock(&dsa_tag_drivers_lock); list_del(&dsa_tag_driver->list); mutex_unlock(&dsa_tag_drivers_lock); } EXPORT_SYMBOL_GPL(dsa_tag_drivers_register); void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[], unsigned int count) { unsigned int i; for (i = 0; i < count; i++) dsa_tag_driver_unregister(dsa_tag_driver_array[i]); } EXPORT_SYMBOL_GPL(dsa_tag_drivers_unregister); const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops) { return ops->name; }; /* Function takes a reference on the module owning the tagger, * so dsa_tag_driver_put must be called afterwards. */ const struct dsa_device_ops *dsa_tag_driver_get_by_name(const char *name) { const struct dsa_device_ops *ops = ERR_PTR(-ENOPROTOOPT); struct dsa_tag_driver *dsa_tag_driver; request_module("%s%s", DSA_TAG_DRIVER_ALIAS, name); mutex_lock(&dsa_tag_drivers_lock); list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) { const struct dsa_device_ops *tmp = dsa_tag_driver->ops; if (strcmp(name, tmp->name)) continue; if (!try_module_get(dsa_tag_driver->owner)) break; ops = tmp; break; } mutex_unlock(&dsa_tag_drivers_lock); return ops; } const struct dsa_device_ops *dsa_tag_driver_get_by_id(int tag_protocol) { struct dsa_tag_driver *dsa_tag_driver; const struct dsa_device_ops *ops; bool found = false; request_module("%sid-%d", DSA_TAG_DRIVER_ALIAS, tag_protocol); mutex_lock(&dsa_tag_drivers_lock); list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) { ops = dsa_tag_driver->ops; if (ops->proto == tag_protocol) { found = true; break; } } if (found) { if (!try_module_get(dsa_tag_driver->owner)) ops = ERR_PTR(-ENOPROTOOPT); } else { ops = ERR_PTR(-ENOPROTOOPT); } mutex_unlock(&dsa_tag_drivers_lock); return ops; } void dsa_tag_driver_put(const struct dsa_device_ops *ops) { struct dsa_tag_driver *dsa_tag_driver; mutex_lock(&dsa_tag_drivers_lock); list_for_each_entry(dsa_tag_driver, &dsa_tag_drivers_list, list) { if (dsa_tag_driver->ops == ops) { module_put(dsa_tag_driver->owner); break; } } mutex_unlock(&dsa_tag_drivers_lock); } |
| 27 28 15 25 41 41 31 18 18 10 10 10 10 20 20 20 20 8 19 19 19 19 7 7 6 7 7 11 11 10 11 1 75 75 10 10 6 1 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 | // SPDX-License-Identifier: GPL-2.0-or-later /* * net/core/dst_cache.c - dst entry cache * * Copyright (c) 2016 Paolo Abeni <pabeni@redhat.com> */ #include <linux/kernel.h> #include <linux/percpu.h> #include <net/dst_cache.h> #include <net/route.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ip6_fib.h> #endif #include <uapi/linux/in.h> struct dst_cache_pcpu { unsigned long refresh_ts; struct dst_entry *dst; local_lock_t bh_lock; u32 cookie; union { struct in_addr in_saddr; struct in6_addr in6_saddr; }; }; static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache, struct dst_entry *dst, u32 cookie) { DEBUG_NET_WARN_ON_ONCE(!in_softirq()); dst_release(dst_cache->dst); if (dst) dst_hold(dst); dst_cache->cookie = cookie; dst_cache->dst = dst; } static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache, struct dst_cache_pcpu *idst) { struct dst_entry *dst; DEBUG_NET_WARN_ON_ONCE(!in_softirq()); dst = idst->dst; if (!dst) goto fail; /* the cache already hold a dst reference; it can't go away */ dst_hold(dst); if (unlikely(!time_after(idst->refresh_ts, READ_ONCE(dst_cache->reset_ts)) || (READ_ONCE(dst->obsolete) && !dst->ops->check(dst, idst->cookie)))) { dst_cache_per_cpu_dst_set(idst, NULL, 0); dst_release(dst); goto fail; } return dst; fail: idst->refresh_ts = jiffies; return NULL; } struct dst_entry *dst_cache_get(struct dst_cache *dst_cache) { struct dst_entry *dst; if (!dst_cache->cache) return NULL; local_lock_nested_bh(&dst_cache->cache->bh_lock); dst = dst_cache_per_cpu_get(dst_cache, this_cpu_ptr(dst_cache->cache)); local_unlock_nested_bh(&dst_cache->cache->bh_lock); return dst; } EXPORT_SYMBOL_GPL(dst_cache_get); struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr) { struct dst_cache_pcpu *idst; struct dst_entry *dst; if (!dst_cache->cache) return NULL; local_lock_nested_bh(&dst_cache->cache->bh_lock); idst = this_cpu_ptr(dst_cache->cache); dst = dst_cache_per_cpu_get(dst_cache, idst); if (!dst) { local_unlock_nested_bh(&dst_cache->cache->bh_lock); return NULL; } *saddr = idst->in_saddr.s_addr; local_unlock_nested_bh(&dst_cache->cache->bh_lock); return dst_rtable(dst); } EXPORT_SYMBOL_GPL(dst_cache_get_ip4); void dst_cache_set_ip4(struct dst_cache *dst_cache, struct dst_entry *dst, __be32 saddr) { struct dst_cache_pcpu *idst; if (!dst_cache->cache) return; local_lock_nested_bh(&dst_cache->cache->bh_lock); idst = this_cpu_ptr(dst_cache->cache); dst_cache_per_cpu_dst_set(idst, dst, 0); idst->in_saddr.s_addr = saddr; local_unlock_nested_bh(&dst_cache->cache->bh_lock); } EXPORT_SYMBOL_GPL(dst_cache_set_ip4); #if IS_ENABLED(CONFIG_IPV6) void dst_cache_set_ip6(struct dst_cache *dst_cache, struct dst_entry *dst, const struct in6_addr *saddr) { struct dst_cache_pcpu *idst; if (!dst_cache->cache) return; local_lock_nested_bh(&dst_cache->cache->bh_lock); idst = this_cpu_ptr(dst_cache->cache); dst_cache_per_cpu_dst_set(idst, dst, rt6_get_cookie(dst_rt6_info(dst))); idst->in6_saddr = *saddr; local_unlock_nested_bh(&dst_cache->cache->bh_lock); } EXPORT_SYMBOL_GPL(dst_cache_set_ip6); struct dst_entry *dst_cache_get_ip6(struct dst_cache *dst_cache, struct in6_addr *saddr) { struct dst_cache_pcpu *idst; struct dst_entry *dst; if (!dst_cache->cache) return NULL; local_lock_nested_bh(&dst_cache->cache->bh_lock); idst = this_cpu_ptr(dst_cache->cache); dst = dst_cache_per_cpu_get(dst_cache, idst); if (!dst) { local_unlock_nested_bh(&dst_cache->cache->bh_lock); return NULL; } *saddr = idst->in6_saddr; local_unlock_nested_bh(&dst_cache->cache->bh_lock); return dst; } EXPORT_SYMBOL_GPL(dst_cache_get_ip6); #endif int dst_cache_init(struct dst_cache *dst_cache, gfp_t gfp) { unsigned int i; dst_cache->cache = alloc_percpu_gfp(struct dst_cache_pcpu, gfp | __GFP_ZERO); if (!dst_cache->cache) return -ENOMEM; for_each_possible_cpu(i) local_lock_init(&per_cpu_ptr(dst_cache->cache, i)->bh_lock); dst_cache_reset(dst_cache); return 0; } EXPORT_SYMBOL_GPL(dst_cache_init); void dst_cache_destroy(struct dst_cache *dst_cache) { int i; if (!dst_cache->cache) return; for_each_possible_cpu(i) dst_release(per_cpu_ptr(dst_cache->cache, i)->dst); free_percpu(dst_cache->cache); } EXPORT_SYMBOL_GPL(dst_cache_destroy); void dst_cache_reset_now(struct dst_cache *dst_cache) { int i; if (!dst_cache->cache) return; dst_cache_reset(dst_cache); for_each_possible_cpu(i) { struct dst_cache_pcpu *idst = per_cpu_ptr(dst_cache->cache, i); struct dst_entry *dst = idst->dst; idst->cookie = 0; idst->dst = NULL; dst_release(dst); } } EXPORT_SYMBOL_GPL(dst_cache_reset_now); |
| 2 1 4 4 2 2 1 2 1 1 2 110 9 28 8 4 7 2 24 10 4 4 20 21 4 1 4 3 10 117 114 1 13 21 17 1 2 13 1 7 1 1 6 5 25 1 1 20 4 4 17 15 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 | // SPDX-License-Identifier: GPL-2.0-or-later #include <linux/syscalls.h> #include <linux/time_namespace.h> #include "futex.h" /* * Support for robust futexes: the kernel cleans up held futexes at * thread exit time. * * Implementation: user-space maintains a per-thread list of locks it * is holding. Upon do_exit(), the kernel carefully walks this list, * and marks all locks that are owned by this thread with the * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is * always manipulated with the lock held, so the list is private and * per-thread. Userspace also maintains a per-thread 'list_op_pending' * field, to allow the kernel to clean up if the thread dies after * acquiring the lock, but just before it could have added itself to * the list. There can only be one such pending lock. */ /** * sys_set_robust_list() - Set the robust-futex list head of a task * @head: pointer to the list-head * @len: length of the list-head, as userspace expects */ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, size_t, len) { /* * The kernel knows only one size for now: */ if (unlikely(len != sizeof(*head))) return -EINVAL; current->robust_list = head; return 0; } /** * sys_get_robust_list() - Get the robust-futex list head of a task * @pid: pid of the process [zero for current task] * @head_ptr: pointer to a list-head pointer, the kernel fills it in * @len_ptr: pointer to a length field, the kernel fills in the header size */ SYSCALL_DEFINE3(get_robust_list, int, pid, struct robust_list_head __user * __user *, head_ptr, size_t __user *, len_ptr) { struct robust_list_head __user *head; unsigned long ret; struct task_struct *p; rcu_read_lock(); ret = -ESRCH; if (!pid) p = current; else { p = find_task_by_vpid(pid); if (!p) goto err_unlock; } ret = -EPERM; if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) goto err_unlock; head = p->robust_list; rcu_read_unlock(); if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(head, head_ptr); err_unlock: rcu_read_unlock(); return ret; } long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, u32 __user *uaddr2, u32 val2, u32 val3) { unsigned int flags = futex_to_flags(op); int cmd = op & FUTEX_CMD_MASK; if (flags & FLAGS_CLOCKRT) { if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI && cmd != FUTEX_LOCK_PI2) return -ENOSYS; } switch (cmd) { case FUTEX_WAIT: val3 = FUTEX_BITSET_MATCH_ANY; fallthrough; case FUTEX_WAIT_BITSET: return futex_wait(uaddr, flags, val, timeout, val3); case FUTEX_WAKE: val3 = FUTEX_BITSET_MATCH_ANY; fallthrough; case FUTEX_WAKE_BITSET: return futex_wake(uaddr, flags, val, val3); case FUTEX_REQUEUE: return futex_requeue(uaddr, flags, uaddr2, flags, val, val2, NULL, 0); case FUTEX_CMP_REQUEUE: return futex_requeue(uaddr, flags, uaddr2, flags, val, val2, &val3, 0); case FUTEX_WAKE_OP: return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); case FUTEX_LOCK_PI: flags |= FLAGS_CLOCKRT; fallthrough; case FUTEX_LOCK_PI2: return futex_lock_pi(uaddr, flags, timeout, 0); case FUTEX_UNLOCK_PI: return futex_unlock_pi(uaddr, flags); case FUTEX_TRYLOCK_PI: return futex_lock_pi(uaddr, flags, NULL, 1); case FUTEX_WAIT_REQUEUE_PI: val3 = FUTEX_BITSET_MATCH_ANY; return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3, uaddr2); case FUTEX_CMP_REQUEUE_PI: return futex_requeue(uaddr, flags, uaddr2, flags, val, val2, &val3, 1); } return -ENOSYS; } static __always_inline bool futex_cmd_has_timeout(u32 cmd) { switch (cmd) { case FUTEX_WAIT: case FUTEX_LOCK_PI: case FUTEX_LOCK_PI2: case FUTEX_WAIT_BITSET: case FUTEX_WAIT_REQUEUE_PI: return true; } return false; } static __always_inline int futex_init_timeout(u32 cmd, u32 op, struct timespec64 *ts, ktime_t *t) { if (!timespec64_valid(ts)) return -EINVAL; *t = timespec64_to_ktime(*ts); if (cmd == FUTEX_WAIT) *t = ktime_add_safe(ktime_get(), *t); else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME)) *t = timens_ktime_to_host(CLOCK_MONOTONIC, *t); return 0; } SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, const struct __kernel_timespec __user *, utime, u32 __user *, uaddr2, u32, val3) { int ret, cmd = op & FUTEX_CMD_MASK; ktime_t t, *tp = NULL; struct timespec64 ts; if (utime && futex_cmd_has_timeout(cmd)) { if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG)))) return -EFAULT; if (get_timespec64(&ts, utime)) return -EFAULT; ret = futex_init_timeout(cmd, op, &ts, &t); if (ret) return ret; tp = &t; } return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3); } /** * futex_parse_waitv - Parse a waitv array from userspace * @futexv: Kernel side list of waiters to be filled * @uwaitv: Userspace list to be parsed * @nr_futexes: Length of futexv * @wake: Wake to call when futex is woken * @wake_data: Data for the wake handler * * Return: Error code on failure, 0 on success */ int futex_parse_waitv(struct futex_vector *futexv, struct futex_waitv __user *uwaitv, unsigned int nr_futexes, futex_wake_fn *wake, void *wake_data) { struct futex_waitv aux; unsigned int i; for (i = 0; i < nr_futexes; i++) { unsigned int flags; if (copy_from_user(&aux, &uwaitv[i], sizeof(aux))) return -EFAULT; if ((aux.flags & ~FUTEX2_VALID_MASK) || aux.__reserved) return -EINVAL; flags = futex2_to_flags(aux.flags); if (!futex_flags_valid(flags)) return -EINVAL; if (!futex_validate_input(flags, aux.val)) return -EINVAL; futexv[i].w.flags = flags; futexv[i].w.val = aux.val; futexv[i].w.uaddr = aux.uaddr; futexv[i].q = futex_q_init; futexv[i].q.wake = wake; futexv[i].q.wake_data = wake_data; } return 0; } static int futex2_setup_timeout(struct __kernel_timespec __user *timeout, clockid_t clockid, struct hrtimer_sleeper *to) { int flag_clkid = 0, flag_init = 0; struct timespec64 ts; ktime_t time; int ret; if (!timeout) return 0; if (clockid == CLOCK_REALTIME) { flag_clkid = FLAGS_CLOCKRT; flag_init = FUTEX_CLOCK_REALTIME; } if (clockid != CLOCK_REALTIME && clockid != CLOCK_MONOTONIC) return -EINVAL; if (get_timespec64(&ts, timeout)) return -EFAULT; /* * Since there's no opcode for futex_waitv, use * FUTEX_WAIT_BITSET that uses absolute timeout as well */ ret = futex_init_timeout(FUTEX_WAIT_BITSET, flag_init, &ts, &time); if (ret) return ret; futex_setup_timer(&time, to, flag_clkid, 0); return 0; } static inline void futex2_destroy_timeout(struct hrtimer_sleeper *to) { hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); } /** * sys_futex_waitv - Wait on a list of futexes * @waiters: List of futexes to wait on * @nr_futexes: Length of futexv * @flags: Flag for timeout (monotonic/realtime) * @timeout: Optional absolute timeout. * @clockid: Clock to be used for the timeout, realtime or monotonic. * * Given an array of `struct futex_waitv`, wait on each uaddr. The thread wakes * if a futex_wake() is performed at any uaddr. The syscall returns immediately * if any waiter has *uaddr != val. *timeout is an optional timeout value for * the operation. Each waiter has individual flags. The `flags` argument for * the syscall should be used solely for specifying the timeout as realtime, if * needed. Flags for private futexes, sizes, etc. should be used on the * individual flags of each waiter. * * Returns the array index of one of the woken futexes. No further information * is provided: any number of other futexes may also have been woken by the * same event, and if more than one futex was woken, the retrned index may * refer to any one of them. (It is not necessaryily the futex with the * smallest index, nor the one most recently woken, nor...) */ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters, unsigned int, nr_futexes, unsigned int, flags, struct __kernel_timespec __user *, timeout, clockid_t, clockid) { struct hrtimer_sleeper to; struct futex_vector *futexv; int ret; /* This syscall supports no flags for now */ if (flags) return -EINVAL; if (!nr_futexes || nr_futexes > FUTEX_WAITV_MAX || !waiters) return -EINVAL; if (timeout && (ret = futex2_setup_timeout(timeout, clockid, &to))) return ret; futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL); if (!futexv) { ret = -ENOMEM; goto destroy_timer; } ret = futex_parse_waitv(futexv, waiters, nr_futexes, futex_wake_mark, NULL); if (!ret) ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL); kfree(futexv); destroy_timer: if (timeout) futex2_destroy_timeout(&to); return ret; } /* * sys_futex_wake - Wake a number of futexes * @uaddr: Address of the futex(es) to wake * @mask: bitmask * @nr: Number of the futexes to wake * @flags: FUTEX2 flags * * Identical to the traditional FUTEX_WAKE_BITSET op, except it is part of the * futex2 family of calls. */ SYSCALL_DEFINE4(futex_wake, void __user *, uaddr, unsigned long, mask, int, nr, unsigned int, flags) { if (flags & ~FUTEX2_VALID_MASK) return -EINVAL; flags = futex2_to_flags(flags); if (!futex_flags_valid(flags)) return -EINVAL; if (!futex_validate_input(flags, mask)) return -EINVAL; return futex_wake(uaddr, FLAGS_STRICT | flags, nr, mask); } /* * sys_futex_wait - Wait on a futex * @uaddr: Address of the futex to wait on * @val: Value of @uaddr * @mask: bitmask * @flags: FUTEX2 flags * @timeout: Optional absolute timeout * @clockid: Clock to be used for the timeout, realtime or monotonic * * Identical to the traditional FUTEX_WAIT_BITSET op, except it is part of the * futex2 familiy of calls. */ SYSCALL_DEFINE6(futex_wait, void __user *, uaddr, unsigned long, val, unsigned long, mask, unsigned int, flags, struct __kernel_timespec __user *, timeout, clockid_t, clockid) { struct hrtimer_sleeper to; int ret; if (flags & ~FUTEX2_VALID_MASK) return -EINVAL; flags = futex2_to_flags(flags); if (!futex_flags_valid(flags)) return -EINVAL; if (!futex_validate_input(flags, val) || !futex_validate_input(flags, mask)) return -EINVAL; if (timeout && (ret = futex2_setup_timeout(timeout, clockid, &to))) return ret; ret = __futex_wait(uaddr, flags, val, timeout ? &to : NULL, mask); if (timeout) futex2_destroy_timeout(&to); return ret; } /* * sys_futex_requeue - Requeue a waiter from one futex to another * @waiters: array describing the source and destination futex * @flags: unused * @nr_wake: number of futexes to wake * @nr_requeue: number of futexes to requeue * * Identical to the traditional FUTEX_CMP_REQUEUE op, except it is part of the * futex2 family of calls. */ SYSCALL_DEFINE4(futex_requeue, struct futex_waitv __user *, waiters, unsigned int, flags, int, nr_wake, int, nr_requeue) { struct futex_vector futexes[2]; u32 cmpval; int ret; if (flags) return -EINVAL; if (!waiters) return -EINVAL; ret = futex_parse_waitv(futexes, waiters, 2, futex_wake_mark, NULL); if (ret) return ret; cmpval = futexes[0].w.val; return futex_requeue(u64_to_user_ptr(futexes[0].w.uaddr), futexes[0].w.flags, u64_to_user_ptr(futexes[1].w.uaddr), futexes[1].w.flags, nr_wake, nr_requeue, &cmpval, 0); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(set_robust_list, struct compat_robust_list_head __user *, head, compat_size_t, len) { if (unlikely(len != sizeof(*head))) return -EINVAL; current->compat_robust_list = head; return 0; } COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, compat_uptr_t __user *, head_ptr, compat_size_t __user *, len_ptr) { struct compat_robust_list_head __user *head; unsigned long ret; struct task_struct *p; rcu_read_lock(); ret = -ESRCH; if (!pid) p = current; else { p = find_task_by_vpid(pid); if (!p) goto err_unlock; } ret = -EPERM; if (!ptrace_may_access(p, PTRACE_MODE_READ_REALCREDS)) goto err_unlock; head = p->compat_robust_list; rcu_read_unlock(); if (put_user(sizeof(*head), len_ptr)) return -EFAULT; return put_user(ptr_to_compat(head), head_ptr); err_unlock: rcu_read_unlock(); return ret; } #endif /* CONFIG_COMPAT */ #ifdef CONFIG_COMPAT_32BIT_TIME SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, const struct old_timespec32 __user *, utime, u32 __user *, uaddr2, u32, val3) { int ret, cmd = op & FUTEX_CMD_MASK; ktime_t t, *tp = NULL; struct timespec64 ts; if (utime && futex_cmd_has_timeout(cmd)) { if (get_old_timespec32(&ts, utime)) return -EFAULT; ret = futex_init_timeout(cmd, op, &ts, &t); if (ret) return ret; tp = &t; } return do_futex(uaddr, op, val, tp, uaddr2, (unsigned long)utime, val3); } #endif /* CONFIG_COMPAT_32BIT_TIME */ |
| 18 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 | /* SPDX-License-Identifier: GPL-2.0-or-later */ /* * ALSA sequencer Memory Manager * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> */ #ifndef __SND_SEQ_MEMORYMGR_H #define __SND_SEQ_MEMORYMGR_H #include <sound/seq_kernel.h> #include <linux/poll.h> struct snd_info_buffer; /* aliasing for legacy and UMP event packet handling */ union __snd_seq_event { struct snd_seq_event legacy; #if IS_ENABLED(CONFIG_SND_SEQ_UMP) struct snd_seq_ump_event ump; #endif struct { struct snd_seq_event event; #if IS_ENABLED(CONFIG_SND_SEQ_UMP) u32 extra; #endif } __packed raw; }; /* container for sequencer event (internal use) */ struct snd_seq_event_cell { union { struct snd_seq_event event; union __snd_seq_event ump; }; struct snd_seq_pool *pool; /* used pool */ struct snd_seq_event_cell *next; /* next cell */ }; /* design note: the pool is a contiguous block of memory, if we dynamicly want to add additional cells to the pool be better store this in another pool as we need to know the base address of the pool when releasing memory. */ struct snd_seq_pool { struct snd_seq_event_cell *ptr; /* pointer to first event chunk */ struct snd_seq_event_cell *free; /* pointer to the head of the free list */ int total_elements; /* pool size actually allocated */ atomic_t counter; /* cells free */ int size; /* pool size to be allocated */ int room; /* watermark for sleep/wakeup */ int closing; /* statistics */ int max_used; int event_alloc_nopool; int event_alloc_failures; int event_alloc_success; /* Write locking */ wait_queue_head_t output_sleep; /* Pool lock */ spinlock_t lock; }; void snd_seq_cell_free(struct snd_seq_event_cell *cell); int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, struct snd_seq_event_cell **cellp, int nonblock, struct file *file, struct mutex *mutexp); /* return number of unused (free) cells */ static inline int snd_seq_unused_cells(struct snd_seq_pool *pool) { return pool ? pool->total_elements - atomic_read(&pool->counter) : 0; } /* return total number of allocated cells */ static inline int snd_seq_total_cells(struct snd_seq_pool *pool) { return pool ? pool->total_elements : 0; } /* init pool - allocate events */ int snd_seq_pool_init(struct snd_seq_pool *pool); /* done pool - free events */ void snd_seq_pool_mark_closing(struct snd_seq_pool *pool); int snd_seq_pool_done(struct snd_seq_pool *pool); /* create pool */ struct snd_seq_pool *snd_seq_pool_new(int poolsize); /* remove pool */ int snd_seq_pool_delete(struct snd_seq_pool **pool); /* polling */ int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait); void snd_seq_info_pool(struct snd_info_buffer *buffer, struct snd_seq_pool *pool, char *space); #endif |
| 3 3 8 11 10 3 1 2 3 3 1 1 1 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 | /* * algif_rng: User-space interface for random number generators * * This file provides the user-space API for random number generators. * * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * ALTERNATIVELY, this product may be distributed under the terms of * the GNU General Public License, in which case the provisions of the GPL2 * are required INSTEAD OF the above restrictions. (This clause is * necessary due to a potential bad interaction between the GPL and * the restrictions contained in a BSD-style copyright.) * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include <linux/capability.h> #include <linux/module.h> #include <crypto/rng.h> #include <linux/random.h> #include <crypto/if_alg.h> #include <linux/net.h> #include <net/sock.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); MODULE_DESCRIPTION("User-space interface for random number generators"); struct rng_ctx { #define MAXSIZE 128 unsigned int len; struct crypto_rng *drng; u8 *addtl; size_t addtl_len; }; struct rng_parent_ctx { struct crypto_rng *drng; u8 *entropy; }; static void rng_reset_addtl(struct rng_ctx *ctx) { kfree_sensitive(ctx->addtl); ctx->addtl = NULL; ctx->addtl_len = 0; } static int _rng_recvmsg(struct crypto_rng *drng, struct msghdr *msg, size_t len, u8 *addtl, size_t addtl_len) { int err = 0; int genlen = 0; u8 result[MAXSIZE]; if (len == 0) return 0; if (len > MAXSIZE) len = MAXSIZE; /* * although not strictly needed, this is a precaution against coding * errors */ memset(result, 0, len); /* * The enforcement of a proper seeding of an RNG is done within an * RNG implementation. Some RNGs (DRBG, krng) do not need specific * seeding as they automatically seed. The X9.31 DRNG will return * an error if it was not seeded properly. */ genlen = crypto_rng_generate(drng, addtl, addtl_len, result, len); if (genlen < 0) return genlen; err = memcpy_to_msg(msg, result, len); memzero_explicit(result, len); return err ? err : len; } static int rng_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct rng_ctx *ctx = ask->private; return _rng_recvmsg(ctx->drng, msg, len, NULL, 0); } static int rng_test_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); struct rng_ctx *ctx = ask->private; int ret; lock_sock(sock->sk); ret = _rng_recvmsg(ctx->drng, msg, len, ctx->addtl, ctx->addtl_len); rng_reset_addtl(ctx); release_sock(sock->sk); return ret; } static int rng_test_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { int err; struct alg_sock *ask = alg_sk(sock->sk); struct rng_ctx *ctx = ask->private; lock_sock(sock->sk); if (len > MAXSIZE) { err = -EMSGSIZE; goto unlock; } rng_reset_addtl(ctx); ctx->addtl = kmalloc(len, GFP_KERNEL); if (!ctx->addtl) { err = -ENOMEM; goto unlock; } err = memcpy_from_msg(ctx->addtl, msg, len); if (err) { rng_reset_addtl(ctx); goto unlock; } ctx->addtl_len = len; unlock: release_sock(sock->sk); return err ? err : len; } static struct proto_ops algif_rng_ops = { .family = PF_ALG, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .getname = sock_no_getname, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, .sendmsg = sock_no_sendmsg, .release = af_alg_release, .recvmsg = rng_recvmsg, }; static struct proto_ops __maybe_unused algif_rng_test_ops = { .family = PF_ALG, .connect = sock_no_connect, .socketpair = sock_no_socketpair, .getname = sock_no_getname, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .mmap = sock_no_mmap, .bind = sock_no_bind, .accept = sock_no_accept, .release = af_alg_release, .recvmsg = rng_test_recvmsg, .sendmsg = rng_test_sendmsg, }; static void *rng_bind(const char *name, u32 type, u32 mask) { struct rng_parent_ctx *pctx; struct crypto_rng *rng; pctx = kzalloc(sizeof(*pctx), GFP_KERNEL); if (!pctx) return ERR_PTR(-ENOMEM); rng = crypto_alloc_rng(name, type, mask); if (IS_ERR(rng)) { kfree(pctx); return ERR_CAST(rng); } pctx->drng = rng; return pctx; } static void rng_release(void *private) { struct rng_parent_ctx *pctx = private; if (unlikely(!pctx)) return; crypto_free_rng(pctx->drng); kfree_sensitive(pctx->entropy); kfree_sensitive(pctx); } static void rng_sock_destruct(struct sock *sk) { struct alg_sock *ask = alg_sk(sk); struct rng_ctx *ctx = ask->private; rng_reset_addtl(ctx); sock_kfree_s(sk, ctx, ctx->len); af_alg_release_parent(sk); } static int rng_accept_parent(void *private, struct sock *sk) { struct rng_ctx *ctx; struct rng_parent_ctx *pctx = private; struct alg_sock *ask = alg_sk(sk); unsigned int len = sizeof(*ctx); ctx = sock_kmalloc(sk, len, GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->len = len; ctx->addtl = NULL; ctx->addtl_len = 0; /* * No seeding done at that point -- if multiple accepts are * done on one RNG instance, each resulting FD points to the same * state of the RNG. */ ctx->drng = pctx->drng; ask->private = ctx; sk->sk_destruct = rng_sock_destruct; /* * Non NULL pctx->entropy means that CAVP test has been initiated on * this socket, replace proto_ops algif_rng_ops with algif_rng_test_ops. */ if (IS_ENABLED(CONFIG_CRYPTO_USER_API_RNG_CAVP) && pctx->entropy) sk->sk_socket->ops = &algif_rng_test_ops; return 0; } static int rng_setkey(void *private, const u8 *seed, unsigned int seedlen) { struct rng_parent_ctx *pctx = private; /* * Check whether seedlen is of sufficient size is done in RNG * implementations. */ return crypto_rng_reset(pctx->drng, seed, seedlen); } static int __maybe_unused rng_setentropy(void *private, sockptr_t entropy, unsigned int len) { struct rng_parent_ctx *pctx = private; u8 *kentropy = NULL; if (!capable(CAP_SYS_ADMIN)) return -EACCES; if (pctx->entropy) return -EINVAL; if (len > MAXSIZE) return -EMSGSIZE; if (len) { kentropy = memdup_sockptr(entropy, len); if (IS_ERR(kentropy)) return PTR_ERR(kentropy); } crypto_rng_alg(pctx->drng)->set_ent(pctx->drng, kentropy, len); /* * Since rng doesn't perform any memory management for the entropy * buffer, save kentropy pointer to pctx now to free it after use. */ pctx->entropy = kentropy; return 0; } static const struct af_alg_type algif_type_rng = { .bind = rng_bind, .release = rng_release, .accept = rng_accept_parent, .setkey = rng_setkey, #ifdef CONFIG_CRYPTO_USER_API_RNG_CAVP .setentropy = rng_setentropy, #endif .ops = &algif_rng_ops, .name = "rng", .owner = THIS_MODULE }; static int __init rng_init(void) { return af_alg_register_type(&algif_type_rng); } static void __exit rng_exit(void) { int err = af_alg_unregister_type(&algif_type_rng); BUG_ON(err); } module_init(rng_init); module_exit(rng_exit); |
| 2811 2803 1813 1810 447 446 1 446 449 1 124 106 106 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 | // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Christian Brauner <brauner@kernel.org> */ #include <linux/cred.h> #include <linux/fs.h> #include <linux/mnt_idmapping.h> #include <linux/slab.h> #include <linux/user_namespace.h> #include <linux/seq_file.h> #include "internal.h" /* * Outside of this file vfs{g,u}id_t are always created from k{g,u}id_t, * never from raw values. These are just internal helpers. */ #define VFSUIDT_INIT_RAW(val) (vfsuid_t){ val } #define VFSGIDT_INIT_RAW(val) (vfsgid_t){ val } struct mnt_idmap { struct uid_gid_map uid_map; struct uid_gid_map gid_map; refcount_t count; }; /* * Carries the initial idmapping of 0:0:4294967295 which is an identity * mapping. This means that {g,u}id 0 is mapped to {g,u}id 0, {g,u}id 1 is * mapped to {g,u}id 1, [...], {g,u}id 1000 to {g,u}id 1000, [...]. */ struct mnt_idmap nop_mnt_idmap = { .count = REFCOUNT_INIT(1), }; EXPORT_SYMBOL_GPL(nop_mnt_idmap); /* * Carries the invalid idmapping of a full 0-4294967295 {g,u}id range. * This means that all {g,u}ids are mapped to INVALID_VFS{G,U}ID. */ struct mnt_idmap invalid_mnt_idmap = { .count = REFCOUNT_INIT(1), }; EXPORT_SYMBOL_GPL(invalid_mnt_idmap); /** * initial_idmapping - check whether this is the initial mapping * @ns: idmapping to check * * Check whether this is the initial mapping, mapping 0 to 0, 1 to 1, * [...], 1000 to 1000 [...]. * * Return: true if this is the initial mapping, false if not. */ static inline bool initial_idmapping(const struct user_namespace *ns) { return ns == &init_user_ns; } /** * make_vfsuid - map a filesystem kuid according to an idmapping * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * @kuid : kuid to be mapped * * Take a @kuid and remap it from @fs_userns into @idmap. Use this * function when preparing a @kuid to be reported to userspace. * * If initial_idmapping() determines that this is not an idmapped mount * we can simply return @kuid unchanged. * If initial_idmapping() tells us that the filesystem is not mounted with an * idmapping we know the value of @kuid won't change when calling * from_kuid() so we can simply retrieve the value via __kuid_val() * directly. * * Return: @kuid mapped according to @idmap. * If @kuid has no mapping in either @idmap or @fs_userns INVALID_UID is * returned. */ vfsuid_t make_vfsuid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, kuid_t kuid) { uid_t uid; if (idmap == &nop_mnt_idmap) return VFSUIDT_INIT(kuid); if (idmap == &invalid_mnt_idmap) return INVALID_VFSUID; if (initial_idmapping(fs_userns)) uid = __kuid_val(kuid); else uid = from_kuid(fs_userns, kuid); if (uid == (uid_t)-1) return INVALID_VFSUID; return VFSUIDT_INIT_RAW(map_id_down(&idmap->uid_map, uid)); } EXPORT_SYMBOL_GPL(make_vfsuid); /** * make_vfsgid - map a filesystem kgid according to an idmapping * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * @kgid : kgid to be mapped * * Take a @kgid and remap it from @fs_userns into @idmap. Use this * function when preparing a @kgid to be reported to userspace. * * If initial_idmapping() determines that this is not an idmapped mount * we can simply return @kgid unchanged. * If initial_idmapping() tells us that the filesystem is not mounted with an * idmapping we know the value of @kgid won't change when calling * from_kgid() so we can simply retrieve the value via __kgid_val() * directly. * * Return: @kgid mapped according to @idmap. * If @kgid has no mapping in either @idmap or @fs_userns INVALID_GID is * returned. */ vfsgid_t make_vfsgid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, kgid_t kgid) { gid_t gid; if (idmap == &nop_mnt_idmap) return VFSGIDT_INIT(kgid); if (idmap == &invalid_mnt_idmap) return INVALID_VFSGID; if (initial_idmapping(fs_userns)) gid = __kgid_val(kgid); else gid = from_kgid(fs_userns, kgid); if (gid == (gid_t)-1) return INVALID_VFSGID; return VFSGIDT_INIT_RAW(map_id_down(&idmap->gid_map, gid)); } EXPORT_SYMBOL_GPL(make_vfsgid); /** * from_vfsuid - map a vfsuid into the filesystem idmapping * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * @vfsuid : vfsuid to be mapped * * Map @vfsuid into the filesystem idmapping. This function has to be used in * order to e.g. write @vfsuid to inode->i_uid. * * Return: @vfsuid mapped into the filesystem idmapping */ kuid_t from_vfsuid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, vfsuid_t vfsuid) { uid_t uid; if (idmap == &nop_mnt_idmap) return AS_KUIDT(vfsuid); if (idmap == &invalid_mnt_idmap) return INVALID_UID; uid = map_id_up(&idmap->uid_map, __vfsuid_val(vfsuid)); if (uid == (uid_t)-1) return INVALID_UID; if (initial_idmapping(fs_userns)) return KUIDT_INIT(uid); return make_kuid(fs_userns, uid); } EXPORT_SYMBOL_GPL(from_vfsuid); /** * from_vfsgid - map a vfsgid into the filesystem idmapping * @idmap: the mount's idmapping * @fs_userns: the filesystem's idmapping * @vfsgid : vfsgid to be mapped * * Map @vfsgid into the filesystem idmapping. This function has to be used in * order to e.g. write @vfsgid to inode->i_gid. * * Return: @vfsgid mapped into the filesystem idmapping */ kgid_t from_vfsgid(struct mnt_idmap *idmap, struct user_namespace *fs_userns, vfsgid_t vfsgid) { gid_t gid; if (idmap == &nop_mnt_idmap) return AS_KGIDT(vfsgid); if (idmap == &invalid_mnt_idmap) return INVALID_GID; gid = map_id_up(&idmap->gid_map, __vfsgid_val(vfsgid)); if (gid == (gid_t)-1) return INVALID_GID; if (initial_idmapping(fs_userns)) return KGIDT_INIT(gid); return make_kgid(fs_userns, gid); } EXPORT_SYMBOL_GPL(from_vfsgid); #ifdef CONFIG_MULTIUSER /** * vfsgid_in_group_p() - check whether a vfsuid matches the caller's groups * @vfsgid: the mnt gid to match * * This function can be used to determine whether @vfsuid matches any of the * caller's groups. * * Return: 1 if vfsuid matches caller's groups, 0 if not. */ int vfsgid_in_group_p(vfsgid_t vfsgid) { return in_group_p(AS_KGIDT(vfsgid)); } #else int vfsgid_in_group_p(vfsgid_t vfsgid) { return 1; } #endif EXPORT_SYMBOL_GPL(vfsgid_in_group_p); static int copy_mnt_idmap(struct uid_gid_map *map_from, struct uid_gid_map *map_to) { struct uid_gid_extent *forward, *reverse; u32 nr_extents = READ_ONCE(map_from->nr_extents); /* Pairs with smp_wmb() when writing the idmapping. */ smp_rmb(); /* * Don't blindly copy @map_to into @map_from if nr_extents is * smaller or equal to UID_GID_MAP_MAX_BASE_EXTENTS. Since we * read @nr_extents someone could have written an idmapping and * then we might end up with inconsistent data. So just don't do * anything at all. */ if (nr_extents == 0) return -EINVAL; /* * Here we know that nr_extents is greater than zero which means * a map has been written. Since idmappings can't be changed * once they have been written we know that we can safely copy * from @map_to into @map_from. */ if (nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) { *map_to = *map_from; return 0; } forward = kmemdup_array(map_from->forward, nr_extents, sizeof(struct uid_gid_extent), GFP_KERNEL_ACCOUNT); if (!forward) return -ENOMEM; reverse = kmemdup_array(map_from->reverse, nr_extents, sizeof(struct uid_gid_extent), GFP_KERNEL_ACCOUNT); if (!reverse) { kfree(forward); return -ENOMEM; } /* * The idmapping isn't exposed anywhere so we don't need to care * about ordering between extent pointers and @nr_extents * initialization. */ map_to->forward = forward; map_to->reverse = reverse; map_to->nr_extents = nr_extents; return 0; } static void free_mnt_idmap(struct mnt_idmap *idmap) { if (idmap->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) { kfree(idmap->uid_map.forward); kfree(idmap->uid_map.reverse); } if (idmap->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) { kfree(idmap->gid_map.forward); kfree(idmap->gid_map.reverse); } kfree(idmap); } struct mnt_idmap *alloc_mnt_idmap(struct user_namespace *mnt_userns) { struct mnt_idmap *idmap; int ret; idmap = kzalloc(sizeof(struct mnt_idmap), GFP_KERNEL_ACCOUNT); if (!idmap) return ERR_PTR(-ENOMEM); refcount_set(&idmap->count, 1); ret = copy_mnt_idmap(&mnt_userns->uid_map, &idmap->uid_map); if (!ret) ret = copy_mnt_idmap(&mnt_userns->gid_map, &idmap->gid_map); if (ret) { free_mnt_idmap(idmap); idmap = ERR_PTR(ret); } return idmap; } /** * mnt_idmap_get - get a reference to an idmapping * @idmap: the idmap to bump the reference on * * If @idmap is not the @nop_mnt_idmap bump the reference count. * * Return: @idmap with reference count bumped if @not_mnt_idmap isn't passed. */ struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap) { if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap) refcount_inc(&idmap->count); return idmap; } EXPORT_SYMBOL_GPL(mnt_idmap_get); /** * mnt_idmap_put - put a reference to an idmapping * @idmap: the idmap to put the reference on * * If this is a non-initial idmapping, put the reference count when a mount is * released and free it if we're the last user. */ void mnt_idmap_put(struct mnt_idmap *idmap) { if (idmap != &nop_mnt_idmap && idmap != &invalid_mnt_idmap && refcount_dec_and_test(&idmap->count)) free_mnt_idmap(idmap); } EXPORT_SYMBOL_GPL(mnt_idmap_put); int statmount_mnt_idmap(struct mnt_idmap *idmap, struct seq_file *seq, bool uid_map) { struct uid_gid_map *map, *map_up; u32 idx, nr_mappings; if (!is_valid_mnt_idmap(idmap)) return 0; /* * Idmappings are shown relative to the caller's idmapping. * This is both the most intuitive and most useful solution. */ if (uid_map) { map = &idmap->uid_map; map_up = ¤t_user_ns()->uid_map; } else { map = &idmap->gid_map; map_up = ¤t_user_ns()->gid_map; } for (idx = 0, nr_mappings = 0; idx < map->nr_extents; idx++) { uid_t lower; struct uid_gid_extent *extent; if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) extent = &map->extent[idx]; else extent = &map->forward[idx]; /* * Verify that the whole range of the mapping can be * resolved in the caller's idmapping. If it cannot be * resolved skip the mapping. */ lower = map_id_range_up(map_up, extent->lower_first, extent->count); if (lower == (uid_t) -1) continue; seq_printf(seq, "%u %u %u", extent->first, lower, extent->count); seq->count++; /* mappings are separated by \0 */ if (seq_has_overflowed(seq)) return -EAGAIN; nr_mappings++; } return nr_mappings; } |
| 95 72 56 141 6 6 1 18 18 18 18 17 1 17 1 18 18 17 8 2 6 8 6 6 3 3 8 8 4 2 2 2 17 17 16 1 9 7 7 4 7 4 3 1 8 4 4 13 10 19 7 12 8 8 6 2 2 3 1 3 1 1 5 1 1 4 5 4 5 2 1 5 5 10 5 5 5 8 8 13 13 9 9 6 3 6 6 17 4 4 8 8 10 10 19 19 8 3 1 2 3 1 1 1 3 3 4 4 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Memory-to-memory device framework for Video for Linux 2 and vb2. * * Helper functions for devices that use vb2 buffers for both their * source and destination. * * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. * Pawel Osciak, <pawel@osciak.com> * Marek Szyprowski, <m.szyprowski@samsung.com> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <media/media-device.h> #include <media/videobuf2-v4l2.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> MODULE_DESCRIPTION("Mem to mem device framework for vb2"); MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>"); MODULE_LICENSE("GPL"); static bool debug; module_param(debug, bool, 0644); #define dprintk(fmt, arg...) \ do { \ if (debug) \ printk(KERN_DEBUG "%s: " fmt, __func__, ## arg);\ } while (0) /* Instance is already queued on the job_queue */ #define TRANS_QUEUED (1 << 0) /* Instance is currently running in hardware */ #define TRANS_RUNNING (1 << 1) /* Instance is currently aborting */ #define TRANS_ABORT (1 << 2) /* The job queue is not running new jobs */ #define QUEUE_PAUSED (1 << 0) /* Offset base for buffers on the destination queue - used to distinguish * between source and destination buffers when mmapping - they receive the same * offsets but for different queues */ #define DST_QUEUE_OFF_BASE (1 << 30) enum v4l2_m2m_entity_type { MEM2MEM_ENT_TYPE_SOURCE, MEM2MEM_ENT_TYPE_SINK, MEM2MEM_ENT_TYPE_PROC }; static const char * const m2m_entity_name[] = { "source", "sink", "proc" }; /** * struct v4l2_m2m_dev - per-device context * @source: &struct media_entity pointer with the source entity * Used only when the M2M device is registered via * v4l2_m2m_register_media_controller(). * @source_pad: &struct media_pad with the source pad. * Used only when the M2M device is registered via * v4l2_m2m_register_media_controller(). * @sink: &struct media_entity pointer with the sink entity * Used only when the M2M device is registered via * v4l2_m2m_register_media_controller(). * @sink_pad: &struct media_pad with the sink pad. * Used only when the M2M device is registered via * v4l2_m2m_register_media_controller(). * @proc: &struct media_entity pointer with the M2M device itself. * @proc_pads: &struct media_pad with the @proc pads. * Used only when the M2M device is registered via * v4l2_m2m_unregister_media_controller(). * @intf_devnode: &struct media_intf devnode pointer with the interface * with controls the M2M device. * @curr_ctx: currently running instance * @job_queue: instances queued to run * @job_spinlock: protects job_queue * @job_work: worker to run queued jobs. * @job_queue_flags: flags of the queue status, %QUEUE_PAUSED. * @m2m_ops: driver callbacks */ struct v4l2_m2m_dev { struct v4l2_m2m_ctx *curr_ctx; #ifdef CONFIG_MEDIA_CONTROLLER struct media_entity *source; struct media_pad source_pad; struct media_entity sink; struct media_pad sink_pad; struct media_entity proc; struct media_pad proc_pads[2]; struct media_intf_devnode *intf_devnode; #endif struct list_head job_queue; spinlock_t job_spinlock; struct work_struct job_work; unsigned long job_queue_flags; const struct v4l2_m2m_ops *m2m_ops; }; static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type) { if (V4L2_TYPE_IS_OUTPUT(type)) return &m2m_ctx->out_q_ctx; else return &m2m_ctx->cap_q_ctx; } struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type) { struct v4l2_m2m_queue_ctx *q_ctx; q_ctx = get_queue_ctx(m2m_ctx, type); if (!q_ctx) return NULL; return &q_ctx->q; } EXPORT_SYMBOL(v4l2_m2m_get_vq); struct vb2_v4l2_buffer *v4l2_m2m_next_buf(struct v4l2_m2m_queue_ctx *q_ctx) { struct v4l2_m2m_buffer *b; unsigned long flags; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); if (list_empty(&q_ctx->rdy_queue)) { spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); return NULL; } b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); return &b->vb; } EXPORT_SYMBOL_GPL(v4l2_m2m_next_buf); struct vb2_v4l2_buffer *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx) { struct v4l2_m2m_buffer *b; unsigned long flags; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); if (list_empty(&q_ctx->rdy_queue)) { spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); return NULL; } b = list_last_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); return &b->vb; } EXPORT_SYMBOL_GPL(v4l2_m2m_last_buf); struct vb2_v4l2_buffer *v4l2_m2m_buf_remove(struct v4l2_m2m_queue_ctx *q_ctx) { struct v4l2_m2m_buffer *b; unsigned long flags; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); if (list_empty(&q_ctx->rdy_queue)) { spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); return NULL; } b = list_first_entry(&q_ctx->rdy_queue, struct v4l2_m2m_buffer, list); list_del(&b->list); q_ctx->num_rdy--; spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); return &b->vb; } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove); void v4l2_m2m_buf_remove_by_buf(struct v4l2_m2m_queue_ctx *q_ctx, struct vb2_v4l2_buffer *vbuf) { struct v4l2_m2m_buffer *b; unsigned long flags; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); b = container_of(vbuf, struct v4l2_m2m_buffer, vb); list_del(&b->list); q_ctx->num_rdy--; spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_buf); struct vb2_v4l2_buffer * v4l2_m2m_buf_remove_by_idx(struct v4l2_m2m_queue_ctx *q_ctx, unsigned int idx) { struct v4l2_m2m_buffer *b, *tmp; struct vb2_v4l2_buffer *ret = NULL; unsigned long flags; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); list_for_each_entry_safe(b, tmp, &q_ctx->rdy_queue, list) { if (b->vb.vb2_buf.index == idx) { list_del(&b->list); q_ctx->num_rdy--; ret = &b->vb; break; } } spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); return ret; } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_remove_by_idx); /* * Scheduling handlers */ void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev) { unsigned long flags; void *ret = NULL; spin_lock_irqsave(&m2m_dev->job_spinlock, flags); if (m2m_dev->curr_ctx) ret = m2m_dev->curr_ctx->priv; spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); return ret; } EXPORT_SYMBOL(v4l2_m2m_get_curr_priv); /** * v4l2_m2m_try_run() - select next job to perform and run it if possible * @m2m_dev: per-device context * * Get next transaction (if present) from the waiting jobs list and run it. * * Note that this function can run on a given v4l2_m2m_ctx context, * but call .device_run for another context. */ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) { unsigned long flags; spin_lock_irqsave(&m2m_dev->job_spinlock, flags); if (NULL != m2m_dev->curr_ctx) { spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); dprintk("Another instance is running, won't run now\n"); return; } if (list_empty(&m2m_dev->job_queue)) { spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); dprintk("No job pending\n"); return; } if (m2m_dev->job_queue_flags & QUEUE_PAUSED) { spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); dprintk("Running new jobs is paused\n"); return; } m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, struct v4l2_m2m_ctx, queue); m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); dprintk("Running job on m2m_ctx: %p\n", m2m_dev->curr_ctx); m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); } /* * __v4l2_m2m_try_queue() - queue a job * @m2m_dev: m2m device * @m2m_ctx: m2m context * * Check if this context is ready to queue a job. * * This function can run in interrupt context. */ static void __v4l2_m2m_try_queue(struct v4l2_m2m_dev *m2m_dev, struct v4l2_m2m_ctx *m2m_ctx) { unsigned long flags_job; struct vb2_v4l2_buffer *dst, *src; dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); if (!m2m_ctx->out_q_ctx.q.streaming || (!m2m_ctx->cap_q_ctx.q.streaming && !m2m_ctx->ignore_cap_streaming)) { if (!m2m_ctx->ignore_cap_streaming) dprintk("Streaming needs to be on for both queues\n"); else dprintk("Streaming needs to be on for the OUTPUT queue\n"); return; } spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); /* If the context is aborted then don't schedule it */ if (m2m_ctx->job_flags & TRANS_ABORT) { dprintk("Aborted context\n"); goto job_unlock; } if (m2m_ctx->job_flags & TRANS_QUEUED) { dprintk("On job queue already\n"); goto job_unlock; } src = v4l2_m2m_next_src_buf(m2m_ctx); dst = v4l2_m2m_next_dst_buf(m2m_ctx); if (!src && !m2m_ctx->out_q_ctx.buffered) { dprintk("No input buffers available\n"); goto job_unlock; } if (!dst && !m2m_ctx->cap_q_ctx.buffered) { dprintk("No output buffers available\n"); goto job_unlock; } m2m_ctx->new_frame = true; if (src && dst && dst->is_held && dst->vb2_buf.copied_timestamp && dst->vb2_buf.timestamp != src->vb2_buf.timestamp) { dprintk("Timestamp mismatch, returning held capture buffer\n"); dst->is_held = false; v4l2_m2m_dst_buf_remove(m2m_ctx); v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE); dst = v4l2_m2m_next_dst_buf(m2m_ctx); if (!dst && !m2m_ctx->cap_q_ctx.buffered) { dprintk("No output buffers available after returning held buffer\n"); goto job_unlock; } } if (src && dst && (m2m_ctx->out_q_ctx.q.subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)) m2m_ctx->new_frame = !dst->vb2_buf.copied_timestamp || dst->vb2_buf.timestamp != src->vb2_buf.timestamp; if (m2m_ctx->has_stopped) { dprintk("Device has stopped\n"); goto job_unlock; } if (m2m_dev->m2m_ops->job_ready && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { dprintk("Driver not ready\n"); goto job_unlock; } list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); m2m_ctx->job_flags |= TRANS_QUEUED; job_unlock: spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); } /** * v4l2_m2m_try_schedule() - schedule and possibly run a job for any context * @m2m_ctx: m2m context * * Check if this context is ready to queue a job. If suitable, * run the next queued job on the mem2mem device. * * This function shouldn't run in interrupt context. * * Note that v4l2_m2m_try_schedule() can schedule one job for this context, * and then run another job for another context. */ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) { struct v4l2_m2m_dev *m2m_dev = m2m_ctx->m2m_dev; __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); v4l2_m2m_try_run(m2m_dev); } EXPORT_SYMBOL_GPL(v4l2_m2m_try_schedule); /** * v4l2_m2m_device_run_work() - run pending jobs for the context * @work: Work structure used for scheduling the execution of this function. */ static void v4l2_m2m_device_run_work(struct work_struct *work) { struct v4l2_m2m_dev *m2m_dev = container_of(work, struct v4l2_m2m_dev, job_work); v4l2_m2m_try_run(m2m_dev); } /** * v4l2_m2m_cancel_job() - cancel pending jobs for the context * @m2m_ctx: m2m context with jobs to be canceled * * In case of streamoff or release called on any context, * 1] If the context is currently running, then abort job will be called * 2] If the context is queued, then the context will be removed from * the job_queue */ static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) { struct v4l2_m2m_dev *m2m_dev; unsigned long flags; m2m_dev = m2m_ctx->m2m_dev; spin_lock_irqsave(&m2m_dev->job_spinlock, flags); m2m_ctx->job_flags |= TRANS_ABORT; if (m2m_ctx->job_flags & TRANS_RUNNING) { spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); if (m2m_dev->m2m_ops->job_abort) m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); dprintk("m2m_ctx %p running, will wait to complete\n", m2m_ctx); wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING)); } else if (m2m_ctx->job_flags & TRANS_QUEUED) { list_del(&m2m_ctx->queue); m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); dprintk("m2m_ctx: %p had been on queue and was removed\n", m2m_ctx); } else { /* Do nothing, was not on queue/running */ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); } } /* * Schedule the next job, called from v4l2_m2m_job_finish() or * v4l2_m2m_buf_done_and_job_finish(). */ static void v4l2_m2m_schedule_next_job(struct v4l2_m2m_dev *m2m_dev, struct v4l2_m2m_ctx *m2m_ctx) { /* * This instance might have more buffers ready, but since we do not * allow more than one job on the job_queue per instance, each has * to be scheduled separately after the previous one finishes. */ __v4l2_m2m_try_queue(m2m_dev, m2m_ctx); /* * We might be running in atomic context, * but the job must be run in non-atomic context. */ schedule_work(&m2m_dev->job_work); } /* * Assumes job_spinlock is held, called from v4l2_m2m_job_finish() or * v4l2_m2m_buf_done_and_job_finish(). */ static bool _v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, struct v4l2_m2m_ctx *m2m_ctx) { if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { dprintk("Called by an instance not currently running\n"); return false; } list_del(&m2m_dev->curr_ctx->queue); m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); wake_up(&m2m_dev->curr_ctx->finished); m2m_dev->curr_ctx = NULL; return true; } void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, struct v4l2_m2m_ctx *m2m_ctx) { unsigned long flags; bool schedule_next; /* * This function should not be used for drivers that support * holding capture buffers. Those should use * v4l2_m2m_buf_done_and_job_finish() instead. */ WARN_ON(m2m_ctx->out_q_ctx.q.subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF); spin_lock_irqsave(&m2m_dev->job_spinlock, flags); schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); if (schedule_next) v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); } EXPORT_SYMBOL(v4l2_m2m_job_finish); void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev, struct v4l2_m2m_ctx *m2m_ctx, enum vb2_buffer_state state) { struct vb2_v4l2_buffer *src_buf, *dst_buf; bool schedule_next = false; unsigned long flags; spin_lock_irqsave(&m2m_dev->job_spinlock, flags); src_buf = v4l2_m2m_src_buf_remove(m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(m2m_ctx); if (WARN_ON(!src_buf || !dst_buf)) goto unlock; dst_buf->is_held = src_buf->flags & V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; if (!dst_buf->is_held) { v4l2_m2m_dst_buf_remove(m2m_ctx); v4l2_m2m_buf_done(dst_buf, state); } /* * If the request API is being used, returning the OUTPUT * (src) buffer will wake-up any process waiting on the * request file descriptor. * * Therefore, return the CAPTURE (dst) buffer first, * to avoid signalling the request file descriptor * before the CAPTURE buffer is done. */ v4l2_m2m_buf_done(src_buf, state); schedule_next = _v4l2_m2m_job_finish(m2m_dev, m2m_ctx); unlock: spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); if (schedule_next) v4l2_m2m_schedule_next_job(m2m_dev, m2m_ctx); } EXPORT_SYMBOL(v4l2_m2m_buf_done_and_job_finish); void v4l2_m2m_suspend(struct v4l2_m2m_dev *m2m_dev) { unsigned long flags; struct v4l2_m2m_ctx *curr_ctx; spin_lock_irqsave(&m2m_dev->job_spinlock, flags); m2m_dev->job_queue_flags |= QUEUE_PAUSED; curr_ctx = m2m_dev->curr_ctx; spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); if (curr_ctx) wait_event(curr_ctx->finished, !(curr_ctx->job_flags & TRANS_RUNNING)); } EXPORT_SYMBOL(v4l2_m2m_suspend); void v4l2_m2m_resume(struct v4l2_m2m_dev *m2m_dev) { unsigned long flags; spin_lock_irqsave(&m2m_dev->job_spinlock, flags); m2m_dev->job_queue_flags &= ~QUEUE_PAUSED; spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); v4l2_m2m_try_run(m2m_dev); } EXPORT_SYMBOL(v4l2_m2m_resume); int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_requestbuffers *reqbufs) { struct vb2_queue *vq; int ret; vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); ret = vb2_reqbufs(vq, reqbufs); /* If count == 0, then the owner has released all buffers and he is no longer owner of the queue. Otherwise we have an owner. */ if (ret == 0) vq->owner = reqbufs->count ? file->private_data : NULL; return ret; } EXPORT_SYMBOL_GPL(v4l2_m2m_reqbufs); static void v4l2_m2m_adjust_mem_offset(struct vb2_queue *vq, struct v4l2_buffer *buf) { /* Adjust MMAP memory offsets for the CAPTURE queue */ if (buf->memory == V4L2_MEMORY_MMAP && V4L2_TYPE_IS_CAPTURE(vq->type)) { if (V4L2_TYPE_IS_MULTIPLANAR(vq->type)) { unsigned int i; for (i = 0; i < buf->length; ++i) buf->m.planes[i].m.mem_offset += DST_QUEUE_OFF_BASE; } else { buf->m.offset += DST_QUEUE_OFF_BASE; } } } int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { struct vb2_queue *vq; int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); ret = vb2_querybuf(vq, buf); if (ret) return ret; /* Adjust MMAP memory offsets for the CAPTURE queue */ v4l2_m2m_adjust_mem_offset(vq, buf); return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_querybuf); /* * This will add the LAST flag and mark the buffer management * state as stopped. * This is called when the last capture buffer must be flagged as LAST * in draining mode from the encoder/decoder driver buf_queue() callback * or from v4l2_update_last_buf_state() when a capture buffer is available. */ void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_v4l2_buffer *vbuf) { vbuf->flags |= V4L2_BUF_FLAG_LAST; vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); v4l2_m2m_mark_stopped(m2m_ctx); } EXPORT_SYMBOL_GPL(v4l2_m2m_last_buffer_done); /* When stop command is issued, update buffer management state */ static int v4l2_update_last_buf_state(struct v4l2_m2m_ctx *m2m_ctx) { struct vb2_v4l2_buffer *next_dst_buf; if (m2m_ctx->is_draining) return -EBUSY; if (m2m_ctx->has_stopped) return 0; m2m_ctx->last_src_buf = v4l2_m2m_last_src_buf(m2m_ctx); m2m_ctx->is_draining = true; /* * The processing of the last output buffer queued before * the STOP command is expected to mark the buffer management * state as stopped with v4l2_m2m_mark_stopped(). */ if (m2m_ctx->last_src_buf) return 0; /* * In case the output queue is empty, try to mark the last capture * buffer as LAST. */ next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); if (!next_dst_buf) { /* * Wait for the next queued one in encoder/decoder driver * buf_queue() callback using the v4l2_m2m_dst_buf_is_last() * helper or in v4l2_m2m_qbuf() if encoder/decoder is not yet * streaming. */ m2m_ctx->next_buf_last = true; return 0; } v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); return 0; } /* * Updates the encoding/decoding buffer management state, should * be called from encoder/decoder drivers start_streaming() */ void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_queue *q) { /* If start streaming again, untag the last output buffer */ if (V4L2_TYPE_IS_OUTPUT(q->type)) m2m_ctx->last_src_buf = NULL; } EXPORT_SYMBOL_GPL(v4l2_m2m_update_start_streaming_state); /* * Updates the encoding/decoding buffer management state, should * be called from encoder/decoder driver stop_streaming() */ void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_queue *q) { if (V4L2_TYPE_IS_OUTPUT(q->type)) { /* * If in draining state, either mark next dst buffer as * done or flag next one to be marked as done either * in encoder/decoder driver buf_queue() callback using * the v4l2_m2m_dst_buf_is_last() helper or in v4l2_m2m_qbuf() * if encoder/decoder is not yet streaming */ if (m2m_ctx->is_draining) { struct vb2_v4l2_buffer *next_dst_buf; m2m_ctx->last_src_buf = NULL; next_dst_buf = v4l2_m2m_dst_buf_remove(m2m_ctx); if (!next_dst_buf) m2m_ctx->next_buf_last = true; else v4l2_m2m_last_buffer_done(m2m_ctx, next_dst_buf); } } else { v4l2_m2m_clear_state(m2m_ctx); } } EXPORT_SYMBOL_GPL(v4l2_m2m_update_stop_streaming_state); static void v4l2_m2m_force_last_buf_done(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_queue *q) { struct vb2_buffer *vb; struct vb2_v4l2_buffer *vbuf; unsigned int i; if (WARN_ON(q->is_output)) return; if (list_empty(&q->queued_list)) return; vb = list_first_entry(&q->queued_list, struct vb2_buffer, queued_entry); for (i = 0; i < vb->num_planes; i++) vb2_set_plane_payload(vb, i, 0); /* * Since the buffer hasn't been queued to the ready queue, * mark is active and owned before marking it LAST and DONE */ vb->state = VB2_BUF_STATE_ACTIVE; atomic_inc(&q->owned_by_drv_count); vbuf = to_vb2_v4l2_buffer(vb); vbuf->field = V4L2_FIELD_NONE; v4l2_m2m_last_buffer_done(m2m_ctx, vbuf); } int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { struct video_device *vdev = video_devdata(file); struct vb2_queue *vq; int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); if (V4L2_TYPE_IS_CAPTURE(vq->type) && (buf->flags & V4L2_BUF_FLAG_REQUEST_FD)) { dprintk("%s: requests cannot be used with capture buffers\n", __func__); return -EPERM; } ret = vb2_qbuf(vq, vdev->v4l2_dev->mdev, buf); if (ret) return ret; /* Adjust MMAP memory offsets for the CAPTURE queue */ v4l2_m2m_adjust_mem_offset(vq, buf); /* * If the capture queue is streaming, but streaming hasn't started * on the device, but was asked to stop, mark the previously queued * buffer as DONE with LAST flag since it won't be queued on the * device. */ if (V4L2_TYPE_IS_CAPTURE(vq->type) && vb2_is_streaming(vq) && !vb2_start_streaming_called(vq) && (v4l2_m2m_has_stopped(m2m_ctx) || v4l2_m2m_dst_buf_is_last(m2m_ctx))) v4l2_m2m_force_last_buf_done(m2m_ctx, vq); else if (!(buf->flags & V4L2_BUF_FLAG_IN_REQUEST)) v4l2_m2m_try_schedule(m2m_ctx); return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_qbuf); int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { struct vb2_queue *vq; int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); ret = vb2_dqbuf(vq, buf, file->f_flags & O_NONBLOCK); if (ret) return ret; /* Adjust MMAP memory offsets for the CAPTURE queue */ v4l2_m2m_adjust_mem_offset(vq, buf); return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf); int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_buffer *buf) { struct video_device *vdev = video_devdata(file); struct vb2_queue *vq; int ret; vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); ret = vb2_prepare_buf(vq, vdev->v4l2_dev->mdev, buf); if (ret) return ret; /* Adjust MMAP memory offsets for the CAPTURE queue */ v4l2_m2m_adjust_mem_offset(vq, buf); return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_prepare_buf); int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_create_buffers *create) { struct vb2_queue *vq; vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); return vb2_create_bufs(vq, create); } EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs); int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_exportbuffer *eb) { struct vb2_queue *vq; vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); return vb2_expbuf(vq, eb); } EXPORT_SYMBOL_GPL(v4l2_m2m_expbuf); int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type) { struct vb2_queue *vq; int ret; vq = v4l2_m2m_get_vq(m2m_ctx, type); ret = vb2_streamon(vq, type); if (!ret) v4l2_m2m_try_schedule(m2m_ctx); return ret; } EXPORT_SYMBOL_GPL(v4l2_m2m_streamon); int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, enum v4l2_buf_type type) { struct v4l2_m2m_dev *m2m_dev; struct v4l2_m2m_queue_ctx *q_ctx; unsigned long flags_job, flags; int ret; /* wait until the current context is dequeued from job_queue */ v4l2_m2m_cancel_job(m2m_ctx); q_ctx = get_queue_ctx(m2m_ctx, type); ret = vb2_streamoff(&q_ctx->q, type); if (ret) return ret; m2m_dev = m2m_ctx->m2m_dev; spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); /* We should not be scheduled anymore, since we're dropping a queue. */ if (m2m_ctx->job_flags & TRANS_QUEUED) list_del(&m2m_ctx->queue); m2m_ctx->job_flags = 0; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); /* Drop queue, since streamoff returns device to the same state as after * calling reqbufs. */ INIT_LIST_HEAD(&q_ctx->rdy_queue); q_ctx->num_rdy = 0; spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); if (m2m_dev->curr_ctx == m2m_ctx) { m2m_dev->curr_ctx = NULL; wake_up(&m2m_ctx->finished); } spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_streamoff); static __poll_t v4l2_m2m_poll_for_data(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct poll_table_struct *wait) { struct vb2_queue *src_q, *dst_q; __poll_t rc = 0; unsigned long flags; src_q = v4l2_m2m_get_src_vq(m2m_ctx); dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); /* * There has to be at least one buffer queued on each queued_list, which * means either in driver already or waiting for driver to claim it * and start processing. */ if ((!vb2_is_streaming(src_q) || src_q->error || list_empty(&src_q->queued_list)) && (!vb2_is_streaming(dst_q) || dst_q->error || (list_empty(&dst_q->queued_list) && !dst_q->last_buffer_dequeued))) return EPOLLERR; spin_lock_irqsave(&src_q->done_lock, flags); if (!list_empty(&src_q->done_list)) rc |= EPOLLOUT | EPOLLWRNORM; spin_unlock_irqrestore(&src_q->done_lock, flags); spin_lock_irqsave(&dst_q->done_lock, flags); /* * If the last buffer was dequeued from the capture queue, signal * userspace. DQBUF(CAPTURE) will return -EPIPE. */ if (!list_empty(&dst_q->done_list) || dst_q->last_buffer_dequeued) rc |= EPOLLIN | EPOLLRDNORM; spin_unlock_irqrestore(&dst_q->done_lock, flags); return rc; } __poll_t v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct poll_table_struct *wait) { struct v4l2_fh *fh = file_to_v4l2_fh(file); struct vb2_queue *src_q = v4l2_m2m_get_src_vq(m2m_ctx); struct vb2_queue *dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); __poll_t req_events = poll_requested_events(wait); __poll_t rc = 0; /* * poll_wait() MUST be called on the first invocation on all the * potential queues of interest, even if we are not interested in their * events during this first call. Failure to do so will result in * queue's events to be ignored because the poll_table won't be capable * of adding new wait queues thereafter. */ poll_wait(file, &src_q->done_wq, wait); poll_wait(file, &dst_q->done_wq, wait); if (req_events & (EPOLLOUT | EPOLLWRNORM | EPOLLIN | EPOLLRDNORM)) rc = v4l2_m2m_poll_for_data(file, m2m_ctx, wait); poll_wait(file, &fh->wait, wait); if (v4l2_event_pending(fh)) rc |= EPOLLPRI; return rc; } EXPORT_SYMBOL_GPL(v4l2_m2m_poll); int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct vm_area_struct *vma) { unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; struct vb2_queue *vq; if (offset < DST_QUEUE_OFF_BASE) { vq = v4l2_m2m_get_src_vq(m2m_ctx); } else { vq = v4l2_m2m_get_dst_vq(m2m_ctx); vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); } return vb2_mmap(vq, vma); } EXPORT_SYMBOL(v4l2_m2m_mmap); #ifndef CONFIG_MMU unsigned long v4l2_m2m_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct v4l2_fh *fh = file_to_v4l2_fh(file); unsigned long offset = pgoff << PAGE_SHIFT; struct vb2_queue *vq; if (offset < DST_QUEUE_OFF_BASE) { vq = v4l2_m2m_get_src_vq(fh->m2m_ctx); } else { vq = v4l2_m2m_get_dst_vq(fh->m2m_ctx); pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); } return vb2_get_unmapped_area(vq, addr, len, pgoff, flags); } EXPORT_SYMBOL_GPL(v4l2_m2m_get_unmapped_area); #endif #if defined(CONFIG_MEDIA_CONTROLLER) void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev) { media_remove_intf_links(&m2m_dev->intf_devnode->intf); media_devnode_remove(m2m_dev->intf_devnode); media_entity_remove_links(m2m_dev->source); media_entity_remove_links(&m2m_dev->sink); media_entity_remove_links(&m2m_dev->proc); media_device_unregister_entity(m2m_dev->source); media_device_unregister_entity(&m2m_dev->sink); media_device_unregister_entity(&m2m_dev->proc); kfree(m2m_dev->source->name); kfree(m2m_dev->sink.name); kfree(m2m_dev->proc.name); } EXPORT_SYMBOL_GPL(v4l2_m2m_unregister_media_controller); static int v4l2_m2m_register_entity(struct media_device *mdev, struct v4l2_m2m_dev *m2m_dev, enum v4l2_m2m_entity_type type, struct video_device *vdev, int function) { struct media_entity *entity; struct media_pad *pads; char *name; unsigned int len; int num_pads; int ret; switch (type) { case MEM2MEM_ENT_TYPE_SOURCE: entity = m2m_dev->source; pads = &m2m_dev->source_pad; pads[0].flags = MEDIA_PAD_FL_SOURCE; num_pads = 1; break; case MEM2MEM_ENT_TYPE_SINK: entity = &m2m_dev->sink; pads = &m2m_dev->sink_pad; pads[0].flags = MEDIA_PAD_FL_SINK; num_pads = 1; break; case MEM2MEM_ENT_TYPE_PROC: entity = &m2m_dev->proc; pads = m2m_dev->proc_pads; pads[0].flags = MEDIA_PAD_FL_SINK; pads[1].flags = MEDIA_PAD_FL_SOURCE; num_pads = 2; break; default: return -EINVAL; } entity->obj_type = MEDIA_ENTITY_TYPE_BASE; if (type != MEM2MEM_ENT_TYPE_PROC) { entity->info.dev.major = VIDEO_MAJOR; entity->info.dev.minor = vdev->minor; } len = strlen(vdev->name) + 2 + strlen(m2m_entity_name[type]); name = kmalloc(len, GFP_KERNEL); if (!name) return -ENOMEM; snprintf(name, len, "%s-%s", vdev->name, m2m_entity_name[type]); entity->name = name; entity->function = function; ret = media_entity_pads_init(entity, num_pads, pads); if (ret) { kfree(entity->name); entity->name = NULL; return ret; } ret = media_device_register_entity(mdev, entity); if (ret) { kfree(entity->name); entity->name = NULL; return ret; } return 0; } int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev, struct video_device *vdev, int function) { struct media_device *mdev = vdev->v4l2_dev->mdev; struct media_link *link; int ret; if (!mdev) return 0; /* A memory-to-memory device consists in two * DMA engine and one video processing entities. * The DMA engine entities are linked to a V4L interface */ /* Create the three entities with their pads */ m2m_dev->source = &vdev->entity; ret = v4l2_m2m_register_entity(mdev, m2m_dev, MEM2MEM_ENT_TYPE_SOURCE, vdev, MEDIA_ENT_F_IO_V4L); if (ret) return ret; ret = v4l2_m2m_register_entity(mdev, m2m_dev, MEM2MEM_ENT_TYPE_PROC, vdev, function); if (ret) goto err_rel_entity0; ret = v4l2_m2m_register_entity(mdev, m2m_dev, MEM2MEM_ENT_TYPE_SINK, vdev, MEDIA_ENT_F_IO_V4L); if (ret) goto err_rel_entity1; /* Connect the three entities */ ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) goto err_rel_entity2; ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (ret) goto err_rm_links0; /* Create video interface */ m2m_dev->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO, 0, VIDEO_MAJOR, vdev->minor); if (!m2m_dev->intf_devnode) { ret = -ENOMEM; goto err_rm_links1; } /* Connect the two DMA engines to the interface */ link = media_create_intf_link(m2m_dev->source, &m2m_dev->intf_devnode->intf, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (!link) { ret = -ENOMEM; goto err_rm_devnode; } link = media_create_intf_link(&m2m_dev->sink, &m2m_dev->intf_devnode->intf, MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED); if (!link) { ret = -ENOMEM; goto err_rm_intf_link; } return 0; err_rm_intf_link: media_remove_intf_links(&m2m_dev->intf_devnode->intf); err_rm_devnode: media_devnode_remove(m2m_dev->intf_devnode); err_rm_links1: media_entity_remove_links(&m2m_dev->sink); err_rm_links0: media_entity_remove_links(&m2m_dev->proc); media_entity_remove_links(m2m_dev->source); err_rel_entity2: media_device_unregister_entity(&m2m_dev->proc); kfree(m2m_dev->proc.name); err_rel_entity1: media_device_unregister_entity(&m2m_dev->sink); kfree(m2m_dev->sink.name); err_rel_entity0: media_device_unregister_entity(m2m_dev->source); kfree(m2m_dev->source->name); return ret; return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_register_media_controller); #endif struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops) { struct v4l2_m2m_dev *m2m_dev; if (!m2m_ops || WARN_ON(!m2m_ops->device_run)) return ERR_PTR(-EINVAL); m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); if (!m2m_dev) return ERR_PTR(-ENOMEM); m2m_dev->curr_ctx = NULL; m2m_dev->m2m_ops = m2m_ops; INIT_LIST_HEAD(&m2m_dev->job_queue); spin_lock_init(&m2m_dev->job_spinlock); INIT_WORK(&m2m_dev->job_work, v4l2_m2m_device_run_work); return m2m_dev; } EXPORT_SYMBOL_GPL(v4l2_m2m_init); void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) { kfree(m2m_dev); } EXPORT_SYMBOL_GPL(v4l2_m2m_release); struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, void *drv_priv, int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)) { struct v4l2_m2m_ctx *m2m_ctx; struct v4l2_m2m_queue_ctx *out_q_ctx, *cap_q_ctx; int ret; m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); if (!m2m_ctx) return ERR_PTR(-ENOMEM); m2m_ctx->priv = drv_priv; m2m_ctx->m2m_dev = m2m_dev; init_waitqueue_head(&m2m_ctx->finished); out_q_ctx = &m2m_ctx->out_q_ctx; cap_q_ctx = &m2m_ctx->cap_q_ctx; INIT_LIST_HEAD(&out_q_ctx->rdy_queue); INIT_LIST_HEAD(&cap_q_ctx->rdy_queue); spin_lock_init(&out_q_ctx->rdy_spinlock); spin_lock_init(&cap_q_ctx->rdy_spinlock); INIT_LIST_HEAD(&m2m_ctx->queue); ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); if (ret) goto err; /* * Both queues should use same the mutex to lock the m2m context. * This lock is used in some v4l2_m2m_* helpers. */ if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { ret = -EINVAL; goto err; } m2m_ctx->q_lock = out_q_ctx->q.lock; return m2m_ctx; err: kfree(m2m_ctx); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init); void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) { /* wait until the current context is dequeued from job_queue */ v4l2_m2m_cancel_job(m2m_ctx); vb2_queue_release(&m2m_ctx->cap_q_ctx.q); vb2_queue_release(&m2m_ctx->out_q_ctx.q); kfree(m2m_ctx); } EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_release); void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_v4l2_buffer *vbuf) { struct v4l2_m2m_buffer *b = container_of(vbuf, struct v4l2_m2m_buffer, vb); struct v4l2_m2m_queue_ctx *q_ctx; unsigned long flags; q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); if (!q_ctx) return; spin_lock_irqsave(&q_ctx->rdy_spinlock, flags); list_add_tail(&b->list, &q_ctx->rdy_queue); q_ctx->num_rdy++; spin_unlock_irqrestore(&q_ctx->rdy_spinlock, flags); } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); void v4l2_m2m_buf_copy_metadata(const struct vb2_v4l2_buffer *out_vb, struct vb2_v4l2_buffer *cap_vb, bool copy_frame_flags) { u32 mask = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK; if (copy_frame_flags) mask |= V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME; cap_vb->vb2_buf.timestamp = out_vb->vb2_buf.timestamp; if (out_vb->flags & V4L2_BUF_FLAG_TIMECODE) cap_vb->timecode = out_vb->timecode; cap_vb->field = out_vb->field; cap_vb->flags &= ~mask; cap_vb->flags |= out_vb->flags & mask; cap_vb->vb2_buf.copied_timestamp = 1; } EXPORT_SYMBOL_GPL(v4l2_m2m_buf_copy_metadata); void v4l2_m2m_request_queue(struct media_request *req) { struct media_request_object *obj, *obj_safe; struct v4l2_m2m_ctx *m2m_ctx = NULL; /* * Queue all objects. Note that buffer objects are at the end of the * objects list, after all other object types. Once buffer objects * are queued, the driver might delete them immediately (if the driver * processes the buffer at once), so we have to use * list_for_each_entry_safe() to handle the case where the object we * queue is deleted. */ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { struct v4l2_m2m_ctx *m2m_ctx_obj; struct vb2_buffer *vb; if (!obj->ops->queue) continue; if (vb2_request_object_is_buffer(obj)) { /* Sanity checks */ vb = container_of(obj, struct vb2_buffer, req_obj); WARN_ON(!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)); m2m_ctx_obj = container_of(vb->vb2_queue, struct v4l2_m2m_ctx, out_q_ctx.q); WARN_ON(m2m_ctx && m2m_ctx_obj != m2m_ctx); m2m_ctx = m2m_ctx_obj; } /* * The buffer we queue here can in theory be immediately * unbound, hence the use of list_for_each_entry_safe() * above and why we call the queue op last. */ obj->ops->queue(obj); } WARN_ON(!m2m_ctx); if (m2m_ctx) v4l2_m2m_try_schedule(m2m_ctx); } EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); /* Videobuf2 ioctl helpers */ int v4l2_m2m_ioctl_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *rb) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_reqbufs); int v4l2_m2m_ioctl_create_bufs(struct file *file, void *priv, struct v4l2_create_buffers *create) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_create_bufs); int v4l2_m2m_ioctl_remove_bufs(struct file *file, void *priv, struct v4l2_remove_buffers *remove) { struct v4l2_fh *fh = file_to_v4l2_fh(file); struct vb2_queue *q = v4l2_m2m_get_vq(fh->m2m_ctx, remove->type); if (!q) return -EINVAL; if (q->type != remove->type) return -EINVAL; return vb2_core_remove_bufs(q, remove->index, remove->count); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_remove_bufs); int v4l2_m2m_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_querybuf); int v4l2_m2m_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_qbuf); int v4l2_m2m_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_dqbuf); int v4l2_m2m_ioctl_prepare_buf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_prepare_buf); int v4l2_m2m_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *eb) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_expbuf); int v4l2_m2m_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_streamon(file, fh->m2m_ctx, type); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamon); int v4l2_m2m_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_streamoff); int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *priv, struct v4l2_encoder_cmd *ec) { if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) return -EINVAL; ec->flags = 0; return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_encoder_cmd); int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *dc) { if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) return -EINVAL; dc->flags = 0; if (dc->cmd == V4L2_DEC_CMD_STOP) { dc->stop.pts = 0; } else if (dc->cmd == V4L2_DEC_CMD_START) { dc->start.speed = 0; dc->start.format = V4L2_DEC_START_FMT_NONE; } return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_try_decoder_cmd); /* * Updates the encoding state on ENC_CMD_STOP/ENC_CMD_START * Should be called from the encoder driver encoder_cmd() callback */ int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_encoder_cmd *ec) { if (ec->cmd != V4L2_ENC_CMD_STOP && ec->cmd != V4L2_ENC_CMD_START) return -EINVAL; if (ec->cmd == V4L2_ENC_CMD_STOP) return v4l2_update_last_buf_state(m2m_ctx); if (m2m_ctx->is_draining) return -EBUSY; if (m2m_ctx->has_stopped) m2m_ctx->has_stopped = false; return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_encoder_cmd); /* * Updates the decoding state on DEC_CMD_STOP/DEC_CMD_START * Should be called from the decoder driver decoder_cmd() callback */ int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct v4l2_decoder_cmd *dc) { if (dc->cmd != V4L2_DEC_CMD_STOP && dc->cmd != V4L2_DEC_CMD_START) return -EINVAL; if (dc->cmd == V4L2_DEC_CMD_STOP) return v4l2_update_last_buf_state(m2m_ctx); if (m2m_ctx->is_draining) return -EBUSY; if (m2m_ctx->has_stopped) m2m_ctx->has_stopped = false; return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_decoder_cmd); int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *priv, struct v4l2_encoder_cmd *ec) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_encoder_cmd(file, fh->m2m_ctx, ec); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_encoder_cmd); int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *dc) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_decoder_cmd(file, fh->m2m_ctx, dc); } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_decoder_cmd); int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *dc) { if (dc->cmd != V4L2_DEC_CMD_FLUSH) return -EINVAL; dc->flags = 0; return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_try_decoder_cmd); int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv, struct v4l2_decoder_cmd *dc) { struct v4l2_fh *fh = file_to_v4l2_fh(file); struct vb2_v4l2_buffer *out_vb, *cap_vb; struct v4l2_m2m_dev *m2m_dev = fh->m2m_ctx->m2m_dev; unsigned long flags; int ret; ret = v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv, dc); if (ret < 0) return ret; spin_lock_irqsave(&m2m_dev->job_spinlock, flags); out_vb = v4l2_m2m_last_src_buf(fh->m2m_ctx); cap_vb = v4l2_m2m_last_dst_buf(fh->m2m_ctx); /* * If there is an out buffer pending, then clear any HOLD flag. * * By clearing this flag we ensure that when this output * buffer is processed any held capture buffer will be released. */ if (out_vb) { out_vb->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; } else if (cap_vb && cap_vb->is_held) { /* * If there were no output buffers, but there is a * capture buffer that is held, then release that * buffer. */ cap_vb->is_held = false; v4l2_m2m_dst_buf_remove(fh->m2m_ctx); v4l2_m2m_buf_done(cap_vb, VB2_BUF_STATE_DONE); } spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); return 0; } EXPORT_SYMBOL_GPL(v4l2_m2m_ioctl_stateless_decoder_cmd); /* * v4l2_file_operations helpers. It is assumed here same lock is used * for the output and the capture buffer queue. */ int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma) { struct v4l2_fh *fh = file_to_v4l2_fh(file); return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); } EXPORT_SYMBOL_GPL(v4l2_m2m_fop_mmap); __poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait) { struct v4l2_fh *fh = file_to_v4l2_fh(file); struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; __poll_t ret; if (m2m_ctx->q_lock) mutex_lock(m2m_ctx->q_lock); ret = v4l2_m2m_poll(file, m2m_ctx, wait); if (m2m_ctx->q_lock) mutex_unlock(m2m_ctx->q_lock); return ret; } EXPORT_SYMBOL_GPL(v4l2_m2m_fop_poll); |
| 2 2 2 3 1 1 1 1 1 5 4 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 | // SPDX-License-Identifier: GPL-2.0-only /* * PS/2 driver library * * Copyright (c) 1999-2002 Vojtech Pavlik * Copyright (c) 2004 Dmitry Torokhov */ #include <linux/delay.h> #include <linux/export.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/kmsan-checks.h> #include <linux/serio.h> #include <linux/i8042.h> #include <linux/libps2.h> #define DRIVER_DESC "PS/2 driver library" #define PS2_CMD_SETSCALE11 0x00e6 #define PS2_CMD_SETRES 0x10e8 #define PS2_CMD_EX_SETLEDS 0x20eb #define PS2_CMD_SETLEDS 0x10ed #define PS2_CMD_GETID 0x02f2 #define PS2_CMD_SETREP 0x10f3 /* Set repeat rate/set report rate */ #define PS2_CMD_RESET_BAT 0x02ff #define PS2_RET_BAT 0xaa #define PS2_RET_ID 0x00 #define PS2_RET_ACK 0xfa #define PS2_RET_NAK 0xfe #define PS2_RET_ERR 0xfc #define PS2_FLAG_ACK BIT(0) /* Waiting for ACK/NAK */ #define PS2_FLAG_CMD BIT(1) /* Waiting for a command to finish */ #define PS2_FLAG_CMD1 BIT(2) /* Waiting for the first byte of command response */ #define PS2_FLAG_WAITID BIT(3) /* Command executing is GET ID */ #define PS2_FLAG_NAK BIT(4) /* Last transmission was NAKed */ #define PS2_FLAG_PASS_NOACK BIT(5) /* Pass non-ACK byte to receive handler */ static int ps2_do_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout, unsigned int max_attempts) __releases(&ps2dev->serio->lock) __acquires(&ps2dev->serio->lock) { int attempt = 0; int error; lockdep_assert_held(&ps2dev->serio->lock); do { ps2dev->nak = 1; ps2dev->flags |= PS2_FLAG_ACK; serio_continue_rx(ps2dev->serio); error = serio_write(ps2dev->serio, byte); if (error) dev_dbg(&ps2dev->serio->dev, "failed to write %#02x: %d\n", byte, error); else wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_ACK), msecs_to_jiffies(timeout)); serio_pause_rx(ps2dev->serio); } while (ps2dev->nak == PS2_RET_NAK && ++attempt < max_attempts); ps2dev->flags &= ~PS2_FLAG_ACK; if (!error) { switch (ps2dev->nak) { case 0: break; case PS2_RET_NAK: error = -EAGAIN; break; case PS2_RET_ERR: error = -EPROTO; break; default: error = -EIO; break; } } if (error || attempt > 1) dev_dbg(&ps2dev->serio->dev, "%02x - %d (%x), attempt %d\n", byte, error, ps2dev->nak, attempt); return error; } /** * ps2_sendbyte - sends a byte to the device and wait for acknowledgement * @ps2dev: a PS/2 device to send the data to * @byte: data to be sent to the device * @timeout: timeout for sending the data and receiving an acknowledge * * The function doesn't handle retransmission, the caller is expected to handle * it when needed. * * ps2_sendbyte() can only be called from a process context. */ int ps2_sendbyte(struct ps2dev *ps2dev, u8 byte, unsigned int timeout) { int retval; guard(serio_pause_rx)(ps2dev->serio); retval = ps2_do_sendbyte(ps2dev, byte, timeout, 1); dev_dbg(&ps2dev->serio->dev, "%02x - %x\n", byte, ps2dev->nak); return retval; } EXPORT_SYMBOL(ps2_sendbyte); /** * ps2_begin_command - mark beginning of execution of a complex command * @ps2dev: a PS/2 device executing the command * * Serializes a complex/compound command. Once command is finished * ps2_end_command() should be called. */ void ps2_begin_command(struct ps2dev *ps2dev) { struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex; mutex_lock(m); } EXPORT_SYMBOL(ps2_begin_command); /** * ps2_end_command - mark end of execution of a complex command * @ps2dev: a PS/2 device executing the command */ void ps2_end_command(struct ps2dev *ps2dev) { struct mutex *m = ps2dev->serio->ps2_cmd_mutex ?: &ps2dev->cmd_mutex; mutex_unlock(m); } EXPORT_SYMBOL(ps2_end_command); /** * ps2_drain - waits for device to transmit requested number of bytes * and discards them * @ps2dev: the PS/2 device that should be drained * @maxbytes: maximum number of bytes to be drained * @timeout: time to drain the device */ void ps2_drain(struct ps2dev *ps2dev, size_t maxbytes, unsigned int timeout) { if (maxbytes > sizeof(ps2dev->cmdbuf)) { WARN_ON(1); maxbytes = sizeof(ps2dev->cmdbuf); } ps2_begin_command(ps2dev); scoped_guard(serio_pause_rx, ps2dev->serio) { ps2dev->flags = PS2_FLAG_CMD; ps2dev->cmdcnt = maxbytes; } wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_CMD), msecs_to_jiffies(timeout)); ps2_end_command(ps2dev); } EXPORT_SYMBOL(ps2_drain); /** * ps2_is_keyboard_id - checks received ID byte against the list of * known keyboard IDs * @id_byte: data byte that should be checked */ bool ps2_is_keyboard_id(u8 id_byte) { static const u8 keyboard_ids[] = { 0xab, /* Regular keyboards */ 0xac, /* NCD Sun keyboard */ 0x2b, /* Trust keyboard, translated */ 0x5d, /* Trust keyboard */ 0x60, /* NMB SGI keyboard, translated */ 0x47, /* NMB SGI keyboard */ }; return memchr(keyboard_ids, id_byte, sizeof(keyboard_ids)) != NULL; } EXPORT_SYMBOL(ps2_is_keyboard_id); /* * ps2_adjust_timeout() is called after receiving 1st byte of command * response and tries to reduce remaining timeout to speed up command * completion. */ static int ps2_adjust_timeout(struct ps2dev *ps2dev, unsigned int command, unsigned int timeout) { switch (command) { case PS2_CMD_RESET_BAT: /* * Device has sent the first response byte after * reset command, reset is thus done, so we can * shorten the timeout. * The next byte will come soon (keyboard) or not * at all (mouse). */ if (timeout > msecs_to_jiffies(100)) timeout = msecs_to_jiffies(100); break; case PS2_CMD_GETID: /* * Microsoft Natural Elite keyboard responds to * the GET ID command as it were a mouse, with * a single byte. Fail the command so atkbd will * use alternative probe to detect it. */ if (ps2dev->cmdbuf[1] == 0xaa) { scoped_guard(serio_pause_rx, ps2dev->serio) ps2dev->flags = 0; timeout = 0; } /* * If device behind the port is not a keyboard there * won't be 2nd byte of ID response. */ if (!ps2_is_keyboard_id(ps2dev->cmdbuf[1])) { scoped_guard(serio_pause_rx, ps2dev->serio) ps2dev->flags = ps2dev->cmdcnt = 0; timeout = 0; } break; default: break; } return timeout; } /** * __ps2_command - send a command to PS/2 device * @ps2dev: the PS/2 device that should execute the command * @param: a buffer containing parameters to be sent along with the command, * or place where the results of the command execution will be deposited, * or both * @command: command word that encodes the command itself, as well as number of * additional parameter bytes that should be sent to the device and expected * length of the command response * * Not serialized. Callers should use ps2_begin_command() and ps2_end_command() * to ensure proper serialization for complex commands. */ int __ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command) { unsigned int timeout; unsigned int send = (command >> 12) & 0xf; unsigned int receive = (command >> 8) & 0xf; int rc; int i; u8 send_param[16]; if (receive > sizeof(ps2dev->cmdbuf)) { WARN_ON(1); return -EINVAL; } if (send && !param) { WARN_ON(1); return -EINVAL; } memcpy(send_param, param, send); /* * Not using guard notation because we need to break critical * section below while waiting for the response. */ serio_pause_rx(ps2dev->serio); ps2dev->cmdcnt = receive; switch (command) { case PS2_CMD_GETID: /* * Some mice do not ACK the "get ID" command, prepare to * handle this. */ ps2dev->flags = PS2_FLAG_WAITID; break; case PS2_CMD_SETLEDS: case PS2_CMD_EX_SETLEDS: case PS2_CMD_SETREP: ps2dev->flags = PS2_FLAG_PASS_NOACK; break; default: ps2dev->flags = 0; break; } if (receive) { /* Indicate that we expect response to the command. */ ps2dev->flags |= PS2_FLAG_CMD | PS2_FLAG_CMD1; if (param) for (i = 0; i < receive; i++) ps2dev->cmdbuf[(receive - 1) - i] = param[i]; } /* * Some devices (Synaptics) perform the reset before * ACKing the reset command, and so it can take a long * time before the ACK arrives. */ timeout = command == PS2_CMD_RESET_BAT ? 1000 : 200; rc = ps2_do_sendbyte(ps2dev, command & 0xff, timeout, 2); if (rc) goto out_reset_flags; /* Send command parameters, if any. */ for (i = 0; i < send; i++) { rc = ps2_do_sendbyte(ps2dev, param[i], 200, 2); if (rc) goto out_reset_flags; } serio_continue_rx(ps2dev->serio); /* * The reset command takes a long time to execute. */ timeout = msecs_to_jiffies(command == PS2_CMD_RESET_BAT ? 4000 : 500); timeout = wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_CMD1), timeout); if (ps2dev->cmdcnt && !(ps2dev->flags & PS2_FLAG_CMD1)) { timeout = ps2_adjust_timeout(ps2dev, command, timeout); wait_event_timeout(ps2dev->wait, !(ps2dev->flags & PS2_FLAG_CMD), timeout); } serio_pause_rx(ps2dev->serio); if (param) { for (i = 0; i < receive; i++) param[i] = ps2dev->cmdbuf[(receive - 1) - i]; kmsan_unpoison_memory(param, receive); } if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1)) { rc = -EPROTO; goto out_reset_flags; } rc = 0; out_reset_flags: ps2dev->flags = 0; serio_continue_rx(ps2dev->serio); dev_dbg(&ps2dev->serio->dev, "%02x [%*ph] - %x/%08lx [%*ph]\n", command & 0xff, send, send_param, ps2dev->nak, ps2dev->flags, receive, param ?: send_param); /* * ps_command() handles resends itself, so do not leak -EAGAIN * to the callers. */ return rc != -EAGAIN ? rc : -EPROTO; } EXPORT_SYMBOL(__ps2_command); /** * ps2_command - send a command to PS/2 device * @ps2dev: the PS/2 device that should execute the command * @param: a buffer containing parameters to be sent along with the command, * or place where the results of the command execution will be deposited, * or both * @command: command word that encodes the command itself, as well as number of * additional parameter bytes that should be sent to the device and expected * length of the command response * * Note: ps2_command() serializes the command execution so that only one * command can be executed at a time for either individual port or the entire * 8042 controller. */ int ps2_command(struct ps2dev *ps2dev, u8 *param, unsigned int command) { int rc; ps2_begin_command(ps2dev); rc = __ps2_command(ps2dev, param, command); ps2_end_command(ps2dev); return rc; } EXPORT_SYMBOL(ps2_command); /** * ps2_sliced_command - sends an extended PS/2 command to a mouse * @ps2dev: the PS/2 device that should execute the command * @command: command byte * * The command is sent using "sliced" syntax understood by advanced devices, * such as Logitech or Synaptics touchpads. The command is encoded as: * 0xE6 0xE8 rr 0xE8 ss 0xE8 tt 0xE8 uu where (rr*64)+(ss*16)+(tt*4)+uu * is the command. */ int ps2_sliced_command(struct ps2dev *ps2dev, u8 command) { int i; int retval; ps2_begin_command(ps2dev); retval = __ps2_command(ps2dev, NULL, PS2_CMD_SETSCALE11); if (retval) goto out; for (i = 6; i >= 0; i -= 2) { u8 d = (command >> i) & 3; retval = __ps2_command(ps2dev, &d, PS2_CMD_SETRES); if (retval) break; } out: dev_dbg(&ps2dev->serio->dev, "%02x - %d\n", command, retval); ps2_end_command(ps2dev); return retval; } EXPORT_SYMBOL(ps2_sliced_command); /** * ps2_init - initializes ps2dev structure * @ps2dev: structure to be initialized * @serio: serio port associated with the PS/2 device * @pre_receive_handler: validation handler to check basic communication state * @receive_handler: main protocol handler * * Prepares ps2dev structure for use in drivers for PS/2 devices. */ void ps2_init(struct ps2dev *ps2dev, struct serio *serio, ps2_pre_receive_handler_t pre_receive_handler, ps2_receive_handler_t receive_handler) { ps2dev->pre_receive_handler = pre_receive_handler; ps2dev->receive_handler = receive_handler; mutex_init(&ps2dev->cmd_mutex); lockdep_set_subclass(&ps2dev->cmd_mutex, serio->depth); init_waitqueue_head(&ps2dev->wait); ps2dev->serio = serio; serio_set_drvdata(serio, ps2dev); } EXPORT_SYMBOL(ps2_init); /* * ps2_handle_response() stores device's response to a command and notifies * the process waiting for completion of the command. Note that there is a * distinction between waiting for the first byte of the response, and * waiting for subsequent bytes. It is done so that callers could shorten * timeouts once first byte of response is received. */ static void ps2_handle_response(struct ps2dev *ps2dev, u8 data) { if (ps2dev->cmdcnt) ps2dev->cmdbuf[--ps2dev->cmdcnt] = data; if (ps2dev->flags & PS2_FLAG_CMD1) { ps2dev->flags &= ~PS2_FLAG_CMD1; if (ps2dev->cmdcnt) wake_up(&ps2dev->wait); } if (!ps2dev->cmdcnt) { ps2dev->flags &= ~PS2_FLAG_CMD; wake_up(&ps2dev->wait); } } /* * ps2_handle_ack() processes ACK/NAK of a command from a PS/2 device, * possibly applying workarounds for mice not acknowledging the "get ID" * command. */ static void ps2_handle_ack(struct ps2dev *ps2dev, u8 data) { switch (data) { case PS2_RET_ACK: ps2dev->nak = 0; break; case PS2_RET_NAK: ps2dev->flags |= PS2_FLAG_NAK; ps2dev->nak = PS2_RET_NAK; break; case PS2_RET_ERR: if (ps2dev->flags & PS2_FLAG_NAK) { ps2dev->flags &= ~PS2_FLAG_NAK; ps2dev->nak = PS2_RET_ERR; break; } fallthrough; /* * Workaround for mice which don't ACK the Get ID command. * These are valid mouse IDs that we recognize. */ case 0x00: case 0x03: case 0x04: if (ps2dev->flags & PS2_FLAG_WAITID) { ps2dev->nak = 0; break; } fallthrough; default: /* * Do not signal errors if we get unexpected reply while * waiting for an ACK to the initial (first) command byte: * the device might not be quiesced yet and continue * delivering data. For certain commands (such as set leds and * set repeat rate) that can be used during normal device * operation, we even pass this data byte to the normal receive * handler. * Note that we reset PS2_FLAG_WAITID flag, so the workaround * for mice not acknowledging the Get ID command only triggers * on the 1st byte; if device spews data we really want to see * a real ACK from it. */ dev_dbg(&ps2dev->serio->dev, "unexpected %#02x\n", data); if (ps2dev->flags & PS2_FLAG_PASS_NOACK) ps2dev->receive_handler(ps2dev, data); ps2dev->flags &= ~(PS2_FLAG_WAITID | PS2_FLAG_PASS_NOACK); return; } if (!ps2dev->nak) ps2dev->flags &= ~PS2_FLAG_NAK; ps2dev->flags &= ~PS2_FLAG_ACK; if (!ps2dev->nak && data != PS2_RET_ACK) ps2_handle_response(ps2dev, data); else wake_up(&ps2dev->wait); } /* * Clears state of PS/2 device after communication error by resetting majority * of flags and waking up waiters, if any. */ static void ps2_cleanup(struct ps2dev *ps2dev) { unsigned long old_flags = ps2dev->flags; /* reset all flags except last nak */ ps2dev->flags &= PS2_FLAG_NAK; if (old_flags & PS2_FLAG_ACK) ps2dev->nak = 1; if (old_flags & (PS2_FLAG_ACK | PS2_FLAG_CMD)) wake_up(&ps2dev->wait); } /** * ps2_interrupt - common interrupt handler for PS/2 devices * @serio: serio port for the device * @data: a data byte received from the device * @flags: flags such as %SERIO_PARITY or %SERIO_TIMEOUT indicating state of * the data transfer * * ps2_interrupt() invokes pre-receive handler, optionally handles command * acknowledgement and response from the device, and finally passes the data * to the main protocol handler for future processing. */ irqreturn_t ps2_interrupt(struct serio *serio, u8 data, unsigned int flags) { struct ps2dev *ps2dev = serio_get_drvdata(serio); enum ps2_disposition rc; rc = ps2dev->pre_receive_handler(ps2dev, data, flags); switch (rc) { case PS2_ERROR: ps2_cleanup(ps2dev); break; case PS2_IGNORE: break; case PS2_PROCESS: if (ps2dev->flags & PS2_FLAG_ACK) ps2_handle_ack(ps2dev, data); else if (ps2dev->flags & PS2_FLAG_CMD) ps2_handle_response(ps2dev, data); else ps2dev->receive_handler(ps2dev, data); break; } return IRQ_HANDLED; } EXPORT_SYMBOL(ps2_interrupt); MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); MODULE_DESCRIPTION("PS/2 driver library"); MODULE_LICENSE("GPL"); |
| 1 1 1 1 1 1 12 12 12 12 12 12 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 | /* * Copyright (c) 2002 Red Hat, Inc. All rights reserved. * * This software may be freely redistributed under the terms of the * GNU General Public License. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Authors: David Woodhouse <dwmw2@infradead.org> * David Howells <dhowells@redhat.com> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/pagemap.h> #include <linux/sched.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/iversion.h> #include "internal.h" #include "afs_fs.h" void afs_init_new_symlink(struct afs_vnode *vnode, struct afs_operation *op) { size_t size = strlen(op->create.symlink) + 1; size_t dsize = 0; char *p; if (netfs_alloc_folioq_buffer(NULL, &vnode->directory, &dsize, size, mapping_gfp_mask(vnode->netfs.inode.i_mapping)) < 0) return; vnode->directory_size = dsize; p = kmap_local_folio(folioq_folio(vnode->directory, 0), 0); memcpy(p, op->create.symlink, size); kunmap_local(p); set_bit(AFS_VNODE_DIR_READ, &vnode->flags); netfs_single_mark_inode_dirty(&vnode->netfs.inode); } static void afs_put_link(void *arg) { struct folio *folio = virt_to_folio(arg); kunmap_local(arg); folio_put(folio); } const char *afs_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *callback) { struct afs_vnode *vnode = AFS_FS_I(inode); struct folio *folio; char *content; ssize_t ret; if (!dentry) { /* RCU pathwalk. */ if (!test_bit(AFS_VNODE_DIR_READ, &vnode->flags) || !afs_check_validity(vnode)) return ERR_PTR(-ECHILD); goto good; } if (test_bit(AFS_VNODE_DIR_READ, &vnode->flags)) goto fetch; ret = afs_validate(vnode, NULL); if (ret < 0) return ERR_PTR(ret); if (!test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) && test_bit(AFS_VNODE_DIR_READ, &vnode->flags)) goto good; fetch: ret = afs_read_single(vnode, NULL); if (ret < 0) return ERR_PTR(ret); set_bit(AFS_VNODE_DIR_READ, &vnode->flags); good: folio = folioq_folio(vnode->directory, 0); folio_get(folio); content = kmap_local_folio(folio, 0); set_delayed_call(callback, afs_put_link, content); return content; } int afs_readlink(struct dentry *dentry, char __user *buffer, int buflen) { DEFINE_DELAYED_CALL(done); const char *content; int len; content = afs_get_link(dentry, d_inode(dentry), &done); if (IS_ERR(content)) { do_delayed_call(&done); return PTR_ERR(content); } len = umin(strlen(content), buflen); if (copy_to_user(buffer, content, len)) len = -EFAULT; do_delayed_call(&done); return len; } static const struct inode_operations afs_symlink_inode_operations = { .get_link = afs_get_link, .readlink = afs_readlink, }; static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *parent_vnode) { static unsigned long once_only; pr_warn("kAFS: AFS vnode with undefined type %u\n", vnode->status.type); pr_warn("kAFS: A=%d m=%o s=%llx v=%llx\n", vnode->status.abort_code, vnode->status.mode, vnode->status.size, vnode->status.data_version); pr_warn("kAFS: vnode %llx:%llx:%x\n", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); if (parent_vnode) pr_warn("kAFS: dir %llx:%llx:%x\n", parent_vnode->fid.vid, parent_vnode->fid.vnode, parent_vnode->fid.unique); if (!test_and_set_bit(0, &once_only)) dump_stack(); } /* * Set parameters for the netfs library */ static void afs_set_netfs_context(struct afs_vnode *vnode) { netfs_inode_init(&vnode->netfs, &afs_req_ops, true); } /* * Initialise an inode from the vnode status. */ static int afs_inode_init_from_status(struct afs_operation *op, struct afs_vnode_param *vp, struct afs_vnode *vnode) { struct afs_file_status *status = &vp->scb.status; struct inode *inode = AFS_VNODE_TO_I(vnode); struct timespec64 t; _enter("{%llx:%llu.%u} %s", vp->fid.vid, vp->fid.vnode, vp->fid.unique, op->type ? op->type->name : "???"); _debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu", status->type, status->nlink, (unsigned long long) status->size, status->data_version, status->mode); write_seqlock(&vnode->cb_lock); vnode->cb_v_check = op->cb_v_break; vnode->status = *status; t = status->mtime_client; inode_set_ctime_to_ts(inode, t); inode_set_mtime_to_ts(inode, t); inode_set_atime_to_ts(inode, t); inode->i_flags |= S_NOATIME; inode->i_uid = make_kuid(&init_user_ns, status->owner); inode->i_gid = make_kgid(&init_user_ns, status->group); set_nlink(&vnode->netfs.inode, status->nlink); switch (status->type) { case AFS_FTYPE_FILE: inode->i_mode = S_IFREG | (status->mode & S_IALLUGO); inode->i_op = &afs_file_inode_operations; inode->i_fop = &afs_file_operations; inode->i_mapping->a_ops = &afs_file_aops; mapping_set_large_folios(inode->i_mapping); break; case AFS_FTYPE_DIR: inode->i_mode = S_IFDIR | (status->mode & S_IALLUGO); inode->i_op = &afs_dir_inode_operations; inode->i_fop = &afs_dir_file_operations; inode->i_mapping->a_ops = &afs_dir_aops; __set_bit(NETFS_ICTX_SINGLE_NO_UPLOAD, &vnode->netfs.flags); /* Assume locally cached directory data will be valid. */ __set_bit(AFS_VNODE_DIR_VALID, &vnode->flags); break; case AFS_FTYPE_SYMLINK: /* Symlinks with a mode of 0644 are actually mountpoints. */ if ((status->mode & 0777) == 0644) { inode->i_flags |= S_AUTOMOUNT; set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); inode->i_mode = S_IFDIR | 0555; inode->i_op = &afs_mntpt_inode_operations; inode->i_fop = &afs_mntpt_file_operations; } else { inode->i_mode = S_IFLNK | status->mode; inode->i_op = &afs_symlink_inode_operations; } inode->i_mapping->a_ops = &afs_dir_aops; inode_nohighmem(inode); mapping_set_release_always(inode->i_mapping); break; default: dump_vnode(vnode, op->file[0].vnode != vnode ? op->file[0].vnode : NULL); write_sequnlock(&vnode->cb_lock); return afs_protocol_error(NULL, afs_eproto_file_type); } afs_set_i_size(vnode, status->size); afs_set_netfs_context(vnode); vnode->invalid_before = status->data_version; trace_afs_set_dv(vnode, status->data_version); inode_set_iversion_raw(&vnode->netfs.inode, status->data_version); if (!vp->scb.have_cb) { /* it's a symlink we just created (the fileserver * didn't give us a callback) */ afs_clear_cb_promise(vnode, afs_cb_promise_set_new_symlink); } else { vnode->cb_server = op->server; afs_set_cb_promise(vnode, vp->scb.callback.expires_at, afs_cb_promise_set_new_inode); } write_sequnlock(&vnode->cb_lock); return 0; } /* * Update the core inode struct from a returned status record. */ static void afs_apply_status(struct afs_operation *op, struct afs_vnode_param *vp) { struct afs_file_status *status = &vp->scb.status; struct afs_vnode *vnode = vp->vnode; struct inode *inode = &vnode->netfs.inode; struct timespec64 t; umode_t mode; bool unexpected_jump = false; bool data_changed = false; bool change_size = vp->set_size; _enter("{%llx:%llu.%u} %s", vp->fid.vid, vp->fid.vnode, vp->fid.unique, op->type ? op->type->name : "???"); BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags)); if (status->type != vnode->status.type) { pr_warn("Vnode %llx:%llx:%x changed type %u to %u\n", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, status->type, vnode->status.type); afs_protocol_error(NULL, afs_eproto_bad_status); return; } if (status->nlink != vnode->status.nlink) set_nlink(inode, status->nlink); if (status->owner != vnode->status.owner) inode->i_uid = make_kuid(&init_user_ns, status->owner); if (status->group != vnode->status.group) inode->i_gid = make_kgid(&init_user_ns, status->group); if (status->mode != vnode->status.mode) { mode = inode->i_mode; mode &= ~S_IALLUGO; mode |= status->mode & S_IALLUGO; WRITE_ONCE(inode->i_mode, mode); } t = status->mtime_client; inode_set_mtime_to_ts(inode, t); if (vp->update_ctime) inode_set_ctime_to_ts(inode, op->ctime); if (vnode->status.data_version != status->data_version) { trace_afs_set_dv(vnode, status->data_version); data_changed = true; } vnode->status = *status; if (vp->dv_before + vp->dv_delta != status->data_version) { trace_afs_dv_mismatch(vnode, vp->dv_before, vp->dv_delta, status->data_version); if (vnode->cb_ro_snapshot == atomic_read(&vnode->volume->cb_ro_snapshot) && atomic64_read(&vnode->cb_expires_at) != AFS_NO_CB_PROMISE) pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s (op=%x)\n", vnode->fid.vid, vnode->fid.vnode, (unsigned long long)vp->dv_before + vp->dv_delta, (unsigned long long)status->data_version, op->type ? op->type->name : "???", op->debug_id); vnode->invalid_before = status->data_version; if (vnode->status.type == AFS_FTYPE_DIR) afs_invalidate_dir(vnode, afs_dir_invalid_dv_mismatch); else set_bit(AFS_VNODE_ZAP_DATA, &vnode->flags); change_size = true; data_changed = true; unexpected_jump = true; } else if (vnode->status.type == AFS_FTYPE_DIR) { /* Expected directory change is handled elsewhere so * that we can locally edit the directory and save on a * download. */ if (test_bit(AFS_VNODE_DIR_VALID, &vnode->flags)) data_changed = false; change_size = true; } if (data_changed) { inode_set_iversion_raw(inode, status->data_version); /* Only update the size if the data version jumped. If the * file is being modified locally, then we might have our own * idea of what the size should be that's not the same as * what's on the server. */ vnode->netfs.remote_i_size = status->size; if (change_size || status->size > i_size_read(inode)) { afs_set_i_size(vnode, status->size); if (unexpected_jump) vnode->netfs.zero_point = status->size; inode_set_ctime_to_ts(inode, t); inode_set_atime_to_ts(inode, t); } if (op->ops == &afs_fetch_data_operation) op->fetch.subreq->rreq->i_size = status->size; } } /* * Apply a callback to a vnode. */ static void afs_apply_callback(struct afs_operation *op, struct afs_vnode_param *vp) { struct afs_callback *cb = &vp->scb.callback; struct afs_vnode *vnode = vp->vnode; if (!afs_cb_is_broken(vp->cb_break_before, vnode)) { if (op->volume->type == AFSVL_RWVOL) vnode->cb_server = op->server; afs_set_cb_promise(vnode, cb->expires_at, afs_cb_promise_set_apply_cb); } } /* * Apply the received status and callback to an inode all in the same critical * section to avoid races with afs_validate(). */ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *vp) { struct afs_vnode *vnode = vp->vnode; _enter(""); write_seqlock(&vnode->cb_lock); if (vp->scb.have_error) { /* A YFS server will return this from RemoveFile2 and AFS and * YFS will return this from InlineBulkStatus. */ if (vp->scb.status.abort_code == VNOVNODE) { set_bit(AFS_VNODE_DELETED, &vnode->flags); clear_nlink(&vnode->netfs.inode); __afs_break_callback(vnode, afs_cb_break_for_deleted); op->flags &= ~AFS_OPERATION_DIR_CONFLICT; } } else if (vp->scb.have_status) { if (vp->speculative && (test_bit(AFS_VNODE_MODIFYING, &vnode->flags) || vp->dv_before != vnode->status.data_version)) /* Ignore the result of a speculative bulk status fetch * if it splits around a modification op, thereby * appearing to regress the data version. */ goto out; afs_apply_status(op, vp); if (vp->scb.have_cb) afs_apply_callback(op, vp); } else if (vp->op_unlinked && !(op->flags & AFS_OPERATION_DIR_CONFLICT)) { drop_nlink(&vnode->netfs.inode); if (vnode->netfs.inode.i_nlink == 0) { set_bit(AFS_VNODE_DELETED, &vnode->flags); __afs_break_callback(vnode, afs_cb_break_for_deleted); } } out: write_sequnlock(&vnode->cb_lock); if (vp->scb.have_status) afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb); } static void afs_fetch_status_success(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[op->fetch_status.which]; struct afs_vnode *vnode = vp->vnode; int ret; if (vnode->netfs.inode.i_state & I_NEW) { ret = afs_inode_init_from_status(op, vp, vnode); afs_op_set_error(op, ret); if (ret == 0) afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb); } else { afs_vnode_commit_status(op, vp); } } const struct afs_operation_ops afs_fetch_status_operation = { .issue_afs_rpc = afs_fs_fetch_status, .issue_yfs_rpc = yfs_fs_fetch_status, .success = afs_fetch_status_success, .aborted = afs_check_for_remote_deletion, }; /* * Fetch file status from the volume. */ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool is_new, afs_access_t *_caller_access) { struct afs_operation *op; _enter("%s,{%llx:%llu.%u,S=%lx}", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, vnode->flags); op = afs_alloc_operation(key, vnode->volume); if (IS_ERR(op)) return PTR_ERR(op); afs_op_set_vnode(op, 0, vnode); op->nr_files = 1; op->ops = &afs_fetch_status_operation; afs_begin_vnode_operation(op); afs_wait_for_operation(op); if (_caller_access) *_caller_access = op->file[0].scb.status.caller_access; return afs_put_operation(op); } /* * ilookup() comparator */ int afs_ilookup5_test_by_fid(struct inode *inode, void *opaque) { struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_fid *fid = opaque; return (fid->vnode == vnode->fid.vnode && fid->vnode_hi == vnode->fid.vnode_hi && fid->unique == vnode->fid.unique); } /* * iget5() comparator */ static int afs_iget5_test(struct inode *inode, void *opaque) { struct afs_vnode_param *vp = opaque; //struct afs_vnode *vnode = AFS_FS_I(inode); return afs_ilookup5_test_by_fid(inode, &vp->fid); } /* * iget5() inode initialiser */ static int afs_iget5_set(struct inode *inode, void *opaque) { struct afs_vnode_param *vp = opaque; struct afs_super_info *as = AFS_FS_S(inode->i_sb); struct afs_vnode *vnode = AFS_FS_I(inode); vnode->volume = as->volume; vnode->fid = vp->fid; /* YFS supports 96-bit vnode IDs, but Linux only supports * 64-bit inode numbers. */ inode->i_ino = vnode->fid.vnode; inode->i_generation = vnode->fid.unique; return 0; } /* * Get a cache cookie for an inode. */ static void afs_get_inode_cache(struct afs_vnode *vnode) { #ifdef CONFIG_AFS_FSCACHE struct { __be32 vnode_id; __be32 unique; __be32 vnode_id_ext[2]; /* Allow for a 96-bit key */ } __packed key; struct afs_vnode_cache_aux aux; if (vnode->status.type != AFS_FTYPE_FILE && vnode->status.type != AFS_FTYPE_DIR && vnode->status.type != AFS_FTYPE_SYMLINK) { vnode->netfs.cache = NULL; return; } key.vnode_id = htonl(vnode->fid.vnode); key.unique = htonl(vnode->fid.unique); key.vnode_id_ext[0] = htonl(vnode->fid.vnode >> 32); key.vnode_id_ext[1] = htonl(vnode->fid.vnode_hi); afs_set_cache_aux(vnode, &aux); afs_vnode_set_cache(vnode, fscache_acquire_cookie( vnode->volume->cache, vnode->status.type == AFS_FTYPE_FILE ? 0 : FSCACHE_ADV_SINGLE_CHUNK, &key, sizeof(key), &aux, sizeof(aux), i_size_read(&vnode->netfs.inode))); #endif } /* * inode retrieval */ struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp) { struct afs_vnode_param *dvp = &op->file[0]; struct super_block *sb = dvp->vnode->netfs.inode.i_sb; struct afs_vnode *vnode; struct inode *inode; int ret; _enter(",{%llx:%llu.%u},,", vp->fid.vid, vp->fid.vnode, vp->fid.unique); inode = iget5_locked(sb, vp->fid.vnode, afs_iget5_test, afs_iget5_set, vp); if (!inode) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } vnode = AFS_FS_I(inode); _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }", inode, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); /* deal with an existing inode */ if (!(inode->i_state & I_NEW)) { _leave(" = %p", inode); return inode; } ret = afs_inode_init_from_status(op, vp, vnode); if (ret < 0) goto bad_inode; afs_get_inode_cache(vnode); /* success */ clear_bit(AFS_VNODE_UNSET, &vnode->flags); unlock_new_inode(inode); _leave(" = %p", inode); return inode; /* failure */ bad_inode: iget_failed(inode); _leave(" = %d [bad]", ret); return ERR_PTR(ret); } static int afs_iget5_set_root(struct inode *inode, void *opaque) { struct afs_super_info *as = AFS_FS_S(inode->i_sb); struct afs_vnode *vnode = AFS_FS_I(inode); vnode->volume = as->volume; vnode->fid.vid = as->volume->vid; vnode->fid.vnode = 1; vnode->fid.unique = 1; inode->i_ino = 1; inode->i_generation = 1; return 0; } /* * Set up the root inode for a volume. This is always vnode 1, unique 1 within * the volume. */ struct inode *afs_root_iget(struct super_block *sb, struct key *key) { struct afs_super_info *as = AFS_FS_S(sb); struct afs_operation *op; struct afs_vnode *vnode; struct inode *inode; int ret; _enter(",{%llx},,", as->volume->vid); inode = iget5_locked(sb, 1, NULL, afs_iget5_set_root, NULL); if (!inode) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } _debug("GOT ROOT INODE %p { vl=%llx }", inode, as->volume->vid); BUG_ON(!(inode->i_state & I_NEW)); vnode = AFS_FS_I(inode); vnode->cb_v_check = atomic_read(&as->volume->cb_v_break); afs_set_netfs_context(vnode); op = afs_alloc_operation(key, as->volume); if (IS_ERR(op)) { ret = PTR_ERR(op); goto error; } afs_op_set_vnode(op, 0, vnode); op->nr_files = 1; op->ops = &afs_fetch_status_operation; ret = afs_do_sync_operation(op); if (ret < 0) goto error; afs_get_inode_cache(vnode); clear_bit(AFS_VNODE_UNSET, &vnode->flags); unlock_new_inode(inode); _leave(" = %p", inode); return inode; error: iget_failed(inode); _leave(" = %d [bad]", ret); return ERR_PTR(ret); } /* * read the attributes of an inode */ int afs_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat, u32 request_mask, unsigned int query_flags) { struct inode *inode = d_inode(path->dentry); struct afs_vnode *vnode = AFS_FS_I(inode); struct key *key; int ret, seq; _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation); if (vnode->volume && !(query_flags & AT_STATX_DONT_SYNC) && atomic64_read(&vnode->cb_expires_at) == AFS_NO_CB_PROMISE) { key = afs_request_key(vnode->volume->cell); if (IS_ERR(key)) return PTR_ERR(key); ret = afs_validate(vnode, key); key_put(key); if (ret < 0) return ret; } do { seq = read_seqbegin(&vnode->cb_lock); generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat); if (test_bit(AFS_VNODE_SILLY_DELETED, &vnode->flags) && stat->nlink > 0) stat->nlink -= 1; /* Lie about the size of directories. We maintain a locally * edited copy and may make different allocation decisions on * it, but we need to give userspace the server's size. */ if (S_ISDIR(inode->i_mode)) stat->size = vnode->netfs.remote_i_size; } while (read_seqretry(&vnode->cb_lock, seq)); return 0; } /* * discard an AFS inode */ int afs_drop_inode(struct inode *inode) { _enter(""); if (test_bit(AFS_VNODE_PSEUDODIR, &AFS_FS_I(inode)->flags)) return generic_delete_inode(inode); else return generic_drop_inode(inode); } /* * clear an AFS inode */ void afs_evict_inode(struct inode *inode) { struct afs_vnode_cache_aux aux; struct afs_super_info *sbi = AFS_FS_S(inode->i_sb); struct afs_vnode *vnode = AFS_FS_I(inode); _enter("{%llx:%llu.%d}", vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); _debug("CLEAR INODE %p", inode); ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode); if ((S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && (inode->i_state & I_DIRTY) && !sbi->dyn_root) { struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .for_sync = true, .range_end = LLONG_MAX, }; afs_single_writepages(inode->i_mapping, &wbc); } netfs_wait_for_outstanding_io(inode); truncate_inode_pages_final(&inode->i_data); netfs_free_folioq_buffer(vnode->directory); afs_set_cache_aux(vnode, &aux); netfs_clear_inode_writeback(inode, &aux); clear_inode(inode); while (!list_empty(&vnode->wb_keys)) { struct afs_wb_key *wbk = list_entry(vnode->wb_keys.next, struct afs_wb_key, vnode_link); list_del(&wbk->vnode_link); afs_put_wb_key(wbk); } fscache_relinquish_cookie(afs_vnode_cache(vnode), test_bit(AFS_VNODE_DELETED, &vnode->flags)); afs_prune_wb_keys(vnode); afs_put_permits(rcu_access_pointer(vnode->permit_cache)); key_put(vnode->silly_key); vnode->silly_key = NULL; key_put(vnode->lock_key); vnode->lock_key = NULL; _leave(""); } static void afs_setattr_success(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct inode *inode = &vp->vnode->netfs.inode; loff_t old_i_size = i_size_read(inode); op->setattr.old_i_size = old_i_size; afs_vnode_commit_status(op, vp); /* inode->i_size has now been changed. */ if (op->setattr.attr->ia_valid & ATTR_SIZE) { loff_t size = op->setattr.attr->ia_size; if (size > old_i_size) pagecache_isize_extended(inode, old_i_size, size); } } static void afs_setattr_edit_file(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_vnode *vnode = vp->vnode; struct inode *inode = &vnode->netfs.inode; if (op->setattr.attr->ia_valid & ATTR_SIZE) { loff_t size = op->setattr.attr->ia_size; loff_t old = op->setattr.old_i_size; /* Note: inode->i_size was updated by afs_apply_status() inside * the I/O and callback locks. */ if (size != old) { truncate_pagecache(inode, size); netfs_resize_file(&vnode->netfs, size, true); fscache_resize_cookie(afs_vnode_cache(vnode), size); } } } static const struct afs_operation_ops afs_setattr_operation = { .issue_afs_rpc = afs_fs_setattr, .issue_yfs_rpc = yfs_fs_setattr, .success = afs_setattr_success, .edit_dir = afs_setattr_edit_file, }; /* * set the attributes of an inode */ int afs_setattr(struct mnt_idmap *idmap, struct dentry *dentry, struct iattr *attr) { const unsigned int supported = ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET | ATTR_TOUCH; struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); struct inode *inode = &vnode->netfs.inode; loff_t i_size; int ret; _enter("{%llx:%llu},{n=%pd},%x", vnode->fid.vid, vnode->fid.vnode, dentry, attr->ia_valid); if (!(attr->ia_valid & supported)) { _leave(" = 0 [unsupported]"); return 0; } i_size = i_size_read(inode); if (attr->ia_valid & ATTR_SIZE) { if (!S_ISREG(inode->i_mode)) return -EISDIR; ret = inode_newsize_ok(inode, attr->ia_size); if (ret) return ret; if (attr->ia_size == i_size) attr->ia_valid &= ~ATTR_SIZE; } fscache_use_cookie(afs_vnode_cache(vnode), true); /* Prevent any new writebacks from starting whilst we do this. */ down_write(&vnode->validate_lock); if ((attr->ia_valid & ATTR_SIZE) && S_ISREG(inode->i_mode)) { loff_t size = attr->ia_size; /* Wait for any outstanding writes to the server to complete */ loff_t from = min(size, i_size); loff_t to = max(size, i_size); ret = filemap_fdatawait_range(inode->i_mapping, from, to); if (ret < 0) goto out_unlock; /* Don't talk to the server if we're just shortening in-memory * writes that haven't gone to the server yet. */ if (!(attr->ia_valid & (supported & ~ATTR_SIZE & ~ATTR_MTIME)) && attr->ia_size < i_size && attr->ia_size > vnode->netfs.remote_i_size) { truncate_setsize(inode, attr->ia_size); netfs_resize_file(&vnode->netfs, size, false); fscache_resize_cookie(afs_vnode_cache(vnode), attr->ia_size); ret = 0; goto out_unlock; } } op = afs_alloc_operation(((attr->ia_valid & ATTR_FILE) ? afs_file_key(attr->ia_file) : NULL), vnode->volume); if (IS_ERR(op)) { ret = PTR_ERR(op); goto out_unlock; } afs_op_set_vnode(op, 0, vnode); op->setattr.attr = attr; if (attr->ia_valid & ATTR_SIZE) { op->file[0].dv_delta = 1; op->file[0].set_size = true; } op->ctime = attr->ia_ctime; op->file[0].update_ctime = 1; op->file[0].modification = true; op->ops = &afs_setattr_operation; ret = afs_do_sync_operation(op); out_unlock: up_write(&vnode->validate_lock); fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL); _leave(" = %d", ret); return ret; } |
| 2 388 189 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _DELAYED_CALL_H #define _DELAYED_CALL_H /* * Poor man's closures; I wish we could've done them sanely polymorphic, * but... */ struct delayed_call { void (*fn)(void *); void *arg; }; #define DEFINE_DELAYED_CALL(name) struct delayed_call name = {NULL, NULL} /* I really wish we had closures with sane typechecking... */ static inline void set_delayed_call(struct delayed_call *call, void (*fn)(void *), void *arg) { call->fn = fn; call->arg = arg; } static inline void do_delayed_call(struct delayed_call *call) { if (call->fn) call->fn(call->arg); } static inline void clear_delayed_call(struct delayed_call *call) { call->fn = NULL; } #endif |
| 12 12 12 12 12 12 12 246 1 1 1 1 2842 2847 2850 1 14 1 13 12 3 13 12 12 12 8 181 182 2 2 146 134 12 8 239 1 237 239 7 7 7 4 4 9 7 2 1 2 4 8 8 6 2 8 12 6 1 1 2 1 1 2 1 1 2 2 7 1 2 1 10 10 8 1 3 2 4 5 1 1 1 1 5 1 4 1 7 1 7 1 2 2 8 2 2 2 66 1 65 65 65 67 2 64 65 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 | // SPDX-License-Identifier: GPL-2.0-only #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/workqueue.h> #include <linux/rtnetlink.h> #include <linux/cache.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/idr.h> #include <linux/rculist.h> #include <linux/nsproxy.h> #include <linux/fs.h> #include <linux/proc_ns.h> #include <linux/file.h> #include <linux/export.h> #include <linux/user_namespace.h> #include <linux/net_namespace.h> #include <linux/sched/task.h> #include <linux/uidgid.h> #include <linux/proc_fs.h> #include <net/aligned_data.h> #include <net/sock.h> #include <net/netlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> /* * Our network namespace constructor/destructor lists */ static LIST_HEAD(pernet_list); static struct list_head *first_device = &pernet_list; LIST_HEAD(net_namespace_list); EXPORT_SYMBOL_GPL(net_namespace_list); /* Protects net_namespace_list. Nests iside rtnl_lock() */ DECLARE_RWSEM(net_rwsem); EXPORT_SYMBOL_GPL(net_rwsem); #ifdef CONFIG_KEYS static struct key_tag init_net_key_domain = { .usage = REFCOUNT_INIT(1) }; #endif struct net init_net; EXPORT_SYMBOL(init_net); static bool init_net_initialized; /* * pernet_ops_rwsem: protects: pernet_list, net_generic_ids, * init_net_initialized and first_device pointer. * This is internal net namespace object. Please, don't use it * outside. */ DECLARE_RWSEM(pernet_ops_rwsem); #define MIN_PERNET_OPS_ID \ ((sizeof(struct net_generic) + sizeof(void *) - 1) / sizeof(void *)) #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS; static struct net_generic *net_alloc_generic(void) { unsigned int gen_ptrs = READ_ONCE(max_gen_ptrs); unsigned int generic_size; struct net_generic *ng; generic_size = offsetof(struct net_generic, ptr[gen_ptrs]); ng = kzalloc(generic_size, GFP_KERNEL); if (ng) ng->s.len = gen_ptrs; return ng; } static int net_assign_generic(struct net *net, unsigned int id, void *data) { struct net_generic *ng, *old_ng; BUG_ON(id < MIN_PERNET_OPS_ID); old_ng = rcu_dereference_protected(net->gen, lockdep_is_held(&pernet_ops_rwsem)); if (old_ng->s.len > id) { old_ng->ptr[id] = data; return 0; } ng = net_alloc_generic(); if (!ng) return -ENOMEM; /* * Some synchronisation notes: * * The net_generic explores the net->gen array inside rcu * read section. Besides once set the net->gen->ptr[x] * pointer never changes (see rules in netns/generic.h). * * That said, we simply duplicate this array and schedule * the old copy for kfree after a grace period. */ memcpy(&ng->ptr[MIN_PERNET_OPS_ID], &old_ng->ptr[MIN_PERNET_OPS_ID], (old_ng->s.len - MIN_PERNET_OPS_ID) * sizeof(void *)); ng->ptr[id] = data; rcu_assign_pointer(net->gen, ng); kfree_rcu(old_ng, s.rcu); return 0; } static int ops_init(const struct pernet_operations *ops, struct net *net) { struct net_generic *ng; int err = -ENOMEM; void *data = NULL; if (ops->id) { data = kzalloc(ops->size, GFP_KERNEL); if (!data) goto out; err = net_assign_generic(net, *ops->id, data); if (err) goto cleanup; } err = 0; if (ops->init) err = ops->init(net); if (!err) return 0; if (ops->id) { ng = rcu_dereference_protected(net->gen, lockdep_is_held(&pernet_ops_rwsem)); ng->ptr[*ops->id] = NULL; } cleanup: kfree(data); out: return err; } static void ops_pre_exit_list(const struct pernet_operations *ops, struct list_head *net_exit_list) { struct net *net; if (ops->pre_exit) { list_for_each_entry(net, net_exit_list, exit_list) ops->pre_exit(net); } } static void ops_exit_rtnl_list(const struct list_head *ops_list, const struct pernet_operations *ops, struct list_head *net_exit_list) { const struct pernet_operations *saved_ops = ops; LIST_HEAD(dev_kill_list); struct net *net; rtnl_lock(); list_for_each_entry(net, net_exit_list, exit_list) { __rtnl_net_lock(net); ops = saved_ops; list_for_each_entry_continue_reverse(ops, ops_list, list) { if (ops->exit_rtnl) ops->exit_rtnl(net, &dev_kill_list); } __rtnl_net_unlock(net); } unregister_netdevice_many(&dev_kill_list); rtnl_unlock(); } static void ops_exit_list(const struct pernet_operations *ops, struct list_head *net_exit_list) { if (ops->exit) { struct net *net; list_for_each_entry(net, net_exit_list, exit_list) { ops->exit(net); cond_resched(); } } if (ops->exit_batch) ops->exit_batch(net_exit_list); } static void ops_free_list(const struct pernet_operations *ops, struct list_head *net_exit_list) { struct net *net; if (ops->id) { list_for_each_entry(net, net_exit_list, exit_list) kfree(net_generic(net, *ops->id)); } } static void ops_undo_list(const struct list_head *ops_list, const struct pernet_operations *ops, struct list_head *net_exit_list, bool expedite_rcu) { const struct pernet_operations *saved_ops; bool hold_rtnl = false; if (!ops) ops = list_entry(ops_list, typeof(*ops), list); saved_ops = ops; list_for_each_entry_continue_reverse(ops, ops_list, list) { hold_rtnl |= !!ops->exit_rtnl; ops_pre_exit_list(ops, net_exit_list); } /* Another CPU might be rcu-iterating the list, wait for it. * This needs to be before calling the exit() notifiers, so the * rcu_barrier() after ops_undo_list() isn't sufficient alone. * Also the pre_exit() and exit() methods need this barrier. */ if (expedite_rcu) synchronize_rcu_expedited(); else synchronize_rcu(); if (hold_rtnl) ops_exit_rtnl_list(ops_list, saved_ops, net_exit_list); ops = saved_ops; list_for_each_entry_continue_reverse(ops, ops_list, list) ops_exit_list(ops, net_exit_list); ops = saved_ops; list_for_each_entry_continue_reverse(ops, ops_list, list) ops_free_list(ops, net_exit_list); } static void ops_undo_single(struct pernet_operations *ops, struct list_head *net_exit_list) { LIST_HEAD(ops_list); list_add(&ops->list, &ops_list); ops_undo_list(&ops_list, NULL, net_exit_list, false); list_del(&ops->list); } /* should be called with nsid_lock held */ static int alloc_netid(struct net *net, struct net *peer, int reqid) { int min = 0, max = 0; if (reqid >= 0) { min = reqid; max = reqid + 1; } return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC); } /* This function is used by idr_for_each(). If net is equal to peer, the * function returns the id so that idr_for_each() stops. Because we cannot * returns the id 0 (idr_for_each() will not stop), we return the magic value * NET_ID_ZERO (-1) for it. */ #define NET_ID_ZERO -1 static int net_eq_idr(int id, void *net, void *peer) { if (net_eq(net, peer)) return id ? : NET_ID_ZERO; return 0; } /* Must be called from RCU-critical section or with nsid_lock held */ static int __peernet2id(const struct net *net, struct net *peer) { int id = idr_for_each(&net->netns_ids, net_eq_idr, peer); /* Magic value for id 0. */ if (id == NET_ID_ZERO) return 0; if (id > 0) return id; return NETNSA_NSID_NOT_ASSIGNED; } static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, struct nlmsghdr *nlh, gfp_t gfp); /* This function returns the id of a peer netns. If no id is assigned, one will * be allocated and returned. */ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp) { int id; if (refcount_read(&net->ns.count) == 0) return NETNSA_NSID_NOT_ASSIGNED; spin_lock(&net->nsid_lock); id = __peernet2id(net, peer); if (id >= 0) { spin_unlock(&net->nsid_lock); return id; } /* When peer is obtained from RCU lists, we may race with * its cleanup. Check whether it's alive, and this guarantees * we never hash a peer back to net->netns_ids, after it has * just been idr_remove()'d from there in cleanup_net(). */ if (!maybe_get_net(peer)) { spin_unlock(&net->nsid_lock); return NETNSA_NSID_NOT_ASSIGNED; } id = alloc_netid(net, peer, -1); spin_unlock(&net->nsid_lock); put_net(peer); if (id < 0) return NETNSA_NSID_NOT_ASSIGNED; rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp); return id; } EXPORT_SYMBOL_GPL(peernet2id_alloc); /* This function returns, if assigned, the id of a peer netns. */ int peernet2id(const struct net *net, struct net *peer) { int id; rcu_read_lock(); id = __peernet2id(net, peer); rcu_read_unlock(); return id; } EXPORT_SYMBOL(peernet2id); /* This function returns true is the peer netns has an id assigned into the * current netns. */ bool peernet_has_id(const struct net *net, struct net *peer) { return peernet2id(net, peer) >= 0; } struct net *get_net_ns_by_id(const struct net *net, int id) { struct net *peer; if (id < 0) return NULL; rcu_read_lock(); peer = idr_find(&net->netns_ids, id); if (peer) peer = maybe_get_net(peer); rcu_read_unlock(); return peer; } EXPORT_SYMBOL_GPL(get_net_ns_by_id); static __net_init void preinit_net_sysctl(struct net *net) { net->core.sysctl_somaxconn = SOMAXCONN; /* Limits per socket sk_omem_alloc usage. * TCP zerocopy regular usage needs 128 KB. */ net->core.sysctl_optmem_max = 128 * 1024; net->core.sysctl_txrehash = SOCK_TXREHASH_ENABLED; net->core.sysctl_tstamp_allow_data = 1; } /* init code that must occur even if setup_net() is not called. */ static __net_init void preinit_net(struct net *net, struct user_namespace *user_ns) { refcount_set(&net->passive, 1); refcount_set(&net->ns.count, 1); ref_tracker_dir_init(&net->refcnt_tracker, 128, "net_refcnt"); ref_tracker_dir_init(&net->notrefcnt_tracker, 128, "net_notrefcnt"); get_random_bytes(&net->hash_mix, sizeof(u32)); net->dev_base_seq = 1; net->user_ns = user_ns; idr_init(&net->netns_ids); spin_lock_init(&net->nsid_lock); mutex_init(&net->ipv4.ra_mutex); #ifdef CONFIG_DEBUG_NET_SMALL_RTNL mutex_init(&net->rtnl_mutex); lock_set_cmp_fn(&net->rtnl_mutex, rtnl_net_lock_cmp_fn, NULL); #endif INIT_LIST_HEAD(&net->ptype_all); INIT_LIST_HEAD(&net->ptype_specific); preinit_net_sysctl(net); } /* * setup_net runs the initializers for the network namespace object. */ static __net_init int setup_net(struct net *net) { /* Must be called with pernet_ops_rwsem held */ const struct pernet_operations *ops; LIST_HEAD(net_exit_list); int error = 0; net->net_cookie = atomic64_inc_return(&net_aligned_data.net_cookie); list_for_each_entry(ops, &pernet_list, list) { error = ops_init(ops, net); if (error < 0) goto out_undo; } down_write(&net_rwsem); list_add_tail_rcu(&net->list, &net_namespace_list); up_write(&net_rwsem); out: return error; out_undo: /* Walk through the list backwards calling the exit functions * for the pernet modules whose init functions did not fail. */ list_add(&net->exit_list, &net_exit_list); ops_undo_list(&pernet_list, ops, &net_exit_list, false); rcu_barrier(); goto out; } #ifdef CONFIG_NET_NS static struct ucounts *inc_net_namespaces(struct user_namespace *ns) { return inc_ucount(ns, current_euid(), UCOUNT_NET_NAMESPACES); } static void dec_net_namespaces(struct ucounts *ucounts) { dec_ucount(ucounts, UCOUNT_NET_NAMESPACES); } static struct kmem_cache *net_cachep __ro_after_init; static struct workqueue_struct *netns_wq; static struct net *net_alloc(void) { struct net *net = NULL; struct net_generic *ng; ng = net_alloc_generic(); if (!ng) goto out; net = kmem_cache_zalloc(net_cachep, GFP_KERNEL); if (!net) goto out_free; #ifdef CONFIG_KEYS net->key_domain = kzalloc(sizeof(struct key_tag), GFP_KERNEL); if (!net->key_domain) goto out_free_2; refcount_set(&net->key_domain->usage, 1); #endif rcu_assign_pointer(net->gen, ng); out: return net; #ifdef CONFIG_KEYS out_free_2: kmem_cache_free(net_cachep, net); net = NULL; #endif out_free: kfree(ng); goto out; } static LLIST_HEAD(defer_free_list); static void net_complete_free(void) { struct llist_node *kill_list; struct net *net, *next; /* Get the list of namespaces to free from last round. */ kill_list = llist_del_all(&defer_free_list); llist_for_each_entry_safe(net, next, kill_list, defer_free_list) kmem_cache_free(net_cachep, net); } void net_passive_dec(struct net *net) { if (refcount_dec_and_test(&net->passive)) { kfree(rcu_access_pointer(net->gen)); /* There should not be any trackers left there. */ ref_tracker_dir_exit(&net->notrefcnt_tracker); /* Wait for an extra rcu_barrier() before final free. */ llist_add(&net->defer_free_list, &defer_free_list); } } void net_drop_ns(void *p) { struct net *net = (struct net *)p; if (net) net_passive_dec(net); } struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns, struct net *old_net) { struct ucounts *ucounts; struct net *net; int rv; if (!(flags & CLONE_NEWNET)) return get_net(old_net); ucounts = inc_net_namespaces(user_ns); if (!ucounts) return ERR_PTR(-ENOSPC); net = net_alloc(); if (!net) { rv = -ENOMEM; goto dec_ucounts; } preinit_net(net, user_ns); net->ucounts = ucounts; get_user_ns(user_ns); rv = down_read_killable(&pernet_ops_rwsem); if (rv < 0) goto put_userns; rv = setup_net(net); up_read(&pernet_ops_rwsem); if (rv < 0) { put_userns: #ifdef CONFIG_KEYS key_remove_domain(net->key_domain); #endif put_user_ns(user_ns); net_passive_dec(net); dec_ucounts: dec_net_namespaces(ucounts); return ERR_PTR(rv); } return net; } /** * net_ns_get_ownership - get sysfs ownership data for @net * @net: network namespace in question (can be NULL) * @uid: kernel user ID for sysfs objects * @gid: kernel group ID for sysfs objects * * Returns the uid/gid pair of root in the user namespace associated with the * given network namespace. */ void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid) { if (net) { kuid_t ns_root_uid = make_kuid(net->user_ns, 0); kgid_t ns_root_gid = make_kgid(net->user_ns, 0); if (uid_valid(ns_root_uid)) *uid = ns_root_uid; if (gid_valid(ns_root_gid)) *gid = ns_root_gid; } else { *uid = GLOBAL_ROOT_UID; *gid = GLOBAL_ROOT_GID; } } EXPORT_SYMBOL_GPL(net_ns_get_ownership); static void unhash_nsid(struct net *net, struct net *last) { struct net *tmp; /* This function is only called from cleanup_net() work, * and this work is the only process, that may delete * a net from net_namespace_list. So, when the below * is executing, the list may only grow. Thus, we do not * use for_each_net_rcu() or net_rwsem. */ for_each_net(tmp) { int id; spin_lock(&tmp->nsid_lock); id = __peernet2id(tmp, net); if (id >= 0) idr_remove(&tmp->netns_ids, id); spin_unlock(&tmp->nsid_lock); if (id >= 0) rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL, GFP_KERNEL); if (tmp == last) break; } spin_lock(&net->nsid_lock); idr_destroy(&net->netns_ids); spin_unlock(&net->nsid_lock); } static LLIST_HEAD(cleanup_list); struct task_struct *cleanup_net_task; static void cleanup_net(struct work_struct *work) { struct llist_node *net_kill_list; struct net *net, *tmp, *last; LIST_HEAD(net_exit_list); WRITE_ONCE(cleanup_net_task, current); /* Atomically snapshot the list of namespaces to cleanup */ net_kill_list = llist_del_all(&cleanup_list); down_read(&pernet_ops_rwsem); /* Don't let anyone else find us. */ down_write(&net_rwsem); llist_for_each_entry(net, net_kill_list, cleanup_list) list_del_rcu(&net->list); /* Cache last net. After we unlock rtnl, no one new net * added to net_namespace_list can assign nsid pointer * to a net from net_kill_list (see peernet2id_alloc()). * So, we skip them in unhash_nsid(). * * Note, that unhash_nsid() does not delete nsid links * between net_kill_list's nets, as they've already * deleted from net_namespace_list. But, this would be * useless anyway, as netns_ids are destroyed there. */ last = list_last_entry(&net_namespace_list, struct net, list); up_write(&net_rwsem); llist_for_each_entry(net, net_kill_list, cleanup_list) { unhash_nsid(net, last); list_add_tail(&net->exit_list, &net_exit_list); } ops_undo_list(&pernet_list, NULL, &net_exit_list, true); up_read(&pernet_ops_rwsem); /* Ensure there are no outstanding rcu callbacks using this * network namespace. */ rcu_barrier(); net_complete_free(); /* Finally it is safe to free my network namespace structure */ list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { list_del_init(&net->exit_list); dec_net_namespaces(net->ucounts); #ifdef CONFIG_KEYS key_remove_domain(net->key_domain); #endif put_user_ns(net->user_ns); net_passive_dec(net); } WRITE_ONCE(cleanup_net_task, NULL); } /** * net_ns_barrier - wait until concurrent net_cleanup_work is done * * cleanup_net runs from work queue and will first remove namespaces * from the global list, then run net exit functions. * * Call this in module exit path to make sure that all netns * ->exit ops have been invoked before the function is removed. */ void net_ns_barrier(void) { down_write(&pernet_ops_rwsem); up_write(&pernet_ops_rwsem); } EXPORT_SYMBOL(net_ns_barrier); static DECLARE_WORK(net_cleanup_work, cleanup_net); void __put_net(struct net *net) { ref_tracker_dir_exit(&net->refcnt_tracker); /* Cleanup the network namespace in process context */ if (llist_add(&net->cleanup_list, &cleanup_list)) queue_work(netns_wq, &net_cleanup_work); } EXPORT_SYMBOL_GPL(__put_net); /** * get_net_ns - increment the refcount of the network namespace * @ns: common namespace (net) * * Returns the net's common namespace or ERR_PTR() if ref is zero. */ struct ns_common *get_net_ns(struct ns_common *ns) { struct net *net; net = maybe_get_net(container_of(ns, struct net, ns)); if (net) return &net->ns; return ERR_PTR(-EINVAL); } EXPORT_SYMBOL_GPL(get_net_ns); struct net *get_net_ns_by_fd(int fd) { CLASS(fd, f)(fd); if (fd_empty(f)) return ERR_PTR(-EBADF); if (proc_ns_file(fd_file(f))) { struct ns_common *ns = get_proc_ns(file_inode(fd_file(f))); if (ns->ops == &netns_operations) return get_net(container_of(ns, struct net, ns)); } return ERR_PTR(-EINVAL); } EXPORT_SYMBOL_GPL(get_net_ns_by_fd); #endif struct net *get_net_ns_by_pid(pid_t pid) { struct task_struct *tsk; struct net *net; /* Lookup the network namespace */ net = ERR_PTR(-ESRCH); rcu_read_lock(); tsk = find_task_by_vpid(pid); if (tsk) { struct nsproxy *nsproxy; task_lock(tsk); nsproxy = tsk->nsproxy; if (nsproxy) net = get_net(nsproxy->net_ns); task_unlock(tsk); } rcu_read_unlock(); return net; } EXPORT_SYMBOL_GPL(get_net_ns_by_pid); #ifdef CONFIG_NET_NS_REFCNT_TRACKER static void net_ns_net_debugfs(struct net *net) { ref_tracker_dir_symlink(&net->refcnt_tracker, "netns-%llx-%u-refcnt", net->net_cookie, net->ns.inum); ref_tracker_dir_symlink(&net->notrefcnt_tracker, "netns-%llx-%u-notrefcnt", net->net_cookie, net->ns.inum); } static int __init init_net_debugfs(void) { ref_tracker_dir_debugfs(&init_net.refcnt_tracker); ref_tracker_dir_debugfs(&init_net.notrefcnt_tracker); net_ns_net_debugfs(&init_net); return 0; } late_initcall(init_net_debugfs); #else static void net_ns_net_debugfs(struct net *net) { } #endif static __net_init int net_ns_net_init(struct net *net) { #ifdef CONFIG_NET_NS net->ns.ops = &netns_operations; #endif net->ns.inum = PROC_NET_INIT_INO; if (net != &init_net) { int ret = ns_alloc_inum(&net->ns); if (ret) return ret; } net_ns_net_debugfs(net); return 0; } static __net_exit void net_ns_net_exit(struct net *net) { /* * Initial network namespace doesn't exit so we don't need any * special checks here. */ ns_free_inum(&net->ns); } static struct pernet_operations __net_initdata net_ns_ops = { .init = net_ns_net_init, .exit = net_ns_net_exit, }; static const struct nla_policy rtnl_net_policy[NETNSA_MAX + 1] = { [NETNSA_NONE] = { .type = NLA_UNSPEC }, [NETNSA_NSID] = { .type = NLA_S32 }, [NETNSA_PID] = { .type = NLA_U32 }, [NETNSA_FD] = { .type = NLA_U32 }, [NETNSA_TARGET_NSID] = { .type = NLA_S32 }, }; static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[NETNSA_MAX + 1]; struct nlattr *nla; struct net *peer; int nsid, err; err = nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy, extack); if (err < 0) return err; if (!tb[NETNSA_NSID]) { NL_SET_ERR_MSG(extack, "nsid is missing"); return -EINVAL; } nsid = nla_get_s32(tb[NETNSA_NSID]); if (tb[NETNSA_PID]) { peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); nla = tb[NETNSA_PID]; } else if (tb[NETNSA_FD]) { peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); nla = tb[NETNSA_FD]; } else { NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); return -EINVAL; } if (IS_ERR(peer)) { NL_SET_BAD_ATTR(extack, nla); NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); return PTR_ERR(peer); } spin_lock(&net->nsid_lock); if (__peernet2id(net, peer) >= 0) { spin_unlock(&net->nsid_lock); err = -EEXIST; NL_SET_BAD_ATTR(extack, nla); NL_SET_ERR_MSG(extack, "Peer netns already has a nsid assigned"); goto out; } err = alloc_netid(net, peer, nsid); spin_unlock(&net->nsid_lock); if (err >= 0) { rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid, nlh, GFP_KERNEL); err = 0; } else if (err == -ENOSPC && nsid >= 0) { err = -EEXIST; NL_SET_BAD_ATTR(extack, tb[NETNSA_NSID]); NL_SET_ERR_MSG(extack, "The specified nsid is already used"); } out: put_net(peer); return err; } static int rtnl_net_get_size(void) { return NLMSG_ALIGN(sizeof(struct rtgenmsg)) + nla_total_size(sizeof(s32)) /* NETNSA_NSID */ + nla_total_size(sizeof(s32)) /* NETNSA_CURRENT_NSID */ ; } struct net_fill_args { u32 portid; u32 seq; int flags; int cmd; int nsid; bool add_ref; int ref_nsid; }; static int rtnl_net_fill(struct sk_buff *skb, struct net_fill_args *args) { struct nlmsghdr *nlh; struct rtgenmsg *rth; nlh = nlmsg_put(skb, args->portid, args->seq, args->cmd, sizeof(*rth), args->flags); if (!nlh) return -EMSGSIZE; rth = nlmsg_data(nlh); rth->rtgen_family = AF_UNSPEC; if (nla_put_s32(skb, NETNSA_NSID, args->nsid)) goto nla_put_failure; if (args->add_ref && nla_put_s32(skb, NETNSA_CURRENT_NSID, args->ref_nsid)) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; } static int rtnl_net_valid_getid_req(struct sk_buff *skb, const struct nlmsghdr *nlh, struct nlattr **tb, struct netlink_ext_ack *extack) { int i, err; if (!netlink_strict_get_check(skb)) return nlmsg_parse_deprecated(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy, extack); err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy, extack); if (err) return err; for (i = 0; i <= NETNSA_MAX; i++) { if (!tb[i]) continue; switch (i) { case NETNSA_PID: case NETNSA_FD: case NETNSA_NSID: case NETNSA_TARGET_NSID: break; default: NL_SET_ERR_MSG(extack, "Unsupported attribute in peer netns getid request"); return -EINVAL; } } return 0; } static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *tb[NETNSA_MAX + 1]; struct net_fill_args fillargs = { .portid = NETLINK_CB(skb).portid, .seq = nlh->nlmsg_seq, .cmd = RTM_NEWNSID, }; struct net *peer, *target = net; struct nlattr *nla; struct sk_buff *msg; int err; err = rtnl_net_valid_getid_req(skb, nlh, tb, extack); if (err < 0) return err; if (tb[NETNSA_PID]) { peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID])); nla = tb[NETNSA_PID]; } else if (tb[NETNSA_FD]) { peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD])); nla = tb[NETNSA_FD]; } else if (tb[NETNSA_NSID]) { peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID])); if (!peer) peer = ERR_PTR(-ENOENT); nla = tb[NETNSA_NSID]; } else { NL_SET_ERR_MSG(extack, "Peer netns reference is missing"); return -EINVAL; } if (IS_ERR(peer)) { NL_SET_BAD_ATTR(extack, nla); NL_SET_ERR_MSG(extack, "Peer netns reference is invalid"); return PTR_ERR(peer); } if (tb[NETNSA_TARGET_NSID]) { int id = nla_get_s32(tb[NETNSA_TARGET_NSID]); target = rtnl_get_net_ns_capable(NETLINK_CB(skb).sk, id); if (IS_ERR(target)) { NL_SET_BAD_ATTR(extack, tb[NETNSA_TARGET_NSID]); NL_SET_ERR_MSG(extack, "Target netns reference is invalid"); err = PTR_ERR(target); goto out; } fillargs.add_ref = true; fillargs.ref_nsid = peernet2id(net, peer); } msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL); if (!msg) { err = -ENOMEM; goto out; } fillargs.nsid = peernet2id(target, peer); err = rtnl_net_fill(msg, &fillargs); if (err < 0) goto err_out; err = rtnl_unicast(msg, net, NETLINK_CB(skb).portid); goto out; err_out: nlmsg_free(msg); out: if (fillargs.add_ref) put_net(target); put_net(peer); return err; } struct rtnl_net_dump_cb { struct net *tgt_net; struct net *ref_net; struct sk_buff *skb; struct net_fill_args fillargs; int idx; int s_idx; }; /* Runs in RCU-critical section. */ static int rtnl_net_dumpid_one(int id, void *peer, void *data) { struct rtnl_net_dump_cb *net_cb = (struct rtnl_net_dump_cb *)data; int ret; if (net_cb->idx < net_cb->s_idx) goto cont; net_cb->fillargs.nsid = id; if (net_cb->fillargs.add_ref) net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer); ret = rtnl_net_fill(net_cb->skb, &net_cb->fillargs); if (ret < 0) return ret; cont: net_cb->idx++; return 0; } static int rtnl_valid_dump_net_req(const struct nlmsghdr *nlh, struct sock *sk, struct rtnl_net_dump_cb *net_cb, struct netlink_callback *cb) { struct netlink_ext_ack *extack = cb->extack; struct nlattr *tb[NETNSA_MAX + 1]; int err, i; err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct rtgenmsg), tb, NETNSA_MAX, rtnl_net_policy, extack); if (err < 0) return err; for (i = 0; i <= NETNSA_MAX; i++) { if (!tb[i]) continue; if (i == NETNSA_TARGET_NSID) { struct net *net; net = rtnl_get_net_ns_capable(sk, nla_get_s32(tb[i])); if (IS_ERR(net)) { NL_SET_BAD_ATTR(extack, tb[i]); NL_SET_ERR_MSG(extack, "Invalid target network namespace id"); return PTR_ERR(net); } net_cb->fillargs.add_ref = true; net_cb->ref_net = net_cb->tgt_net; net_cb->tgt_net = net; } else { NL_SET_BAD_ATTR(extack, tb[i]); NL_SET_ERR_MSG(extack, "Unsupported attribute in dump request"); return -EINVAL; } } return 0; } static int rtnl_net_dumpid(struct sk_buff *skb, struct netlink_callback *cb) { struct rtnl_net_dump_cb net_cb = { .tgt_net = sock_net(skb->sk), .skb = skb, .fillargs = { .portid = NETLINK_CB(cb->skb).portid, .seq = cb->nlh->nlmsg_seq, .flags = NLM_F_MULTI, .cmd = RTM_NEWNSID, }, .idx = 0, .s_idx = cb->args[0], }; int err = 0; if (cb->strict_check) { err = rtnl_valid_dump_net_req(cb->nlh, skb->sk, &net_cb, cb); if (err < 0) goto end; } rcu_read_lock(); idr_for_each(&net_cb.tgt_net->netns_ids, rtnl_net_dumpid_one, &net_cb); rcu_read_unlock(); cb->args[0] = net_cb.idx; end: if (net_cb.fillargs.add_ref) put_net(net_cb.tgt_net); return err; } static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid, struct nlmsghdr *nlh, gfp_t gfp) { struct net_fill_args fillargs = { .portid = portid, .seq = nlh ? nlh->nlmsg_seq : 0, .cmd = cmd, .nsid = id, }; struct sk_buff *msg; int err = -ENOMEM; msg = nlmsg_new(rtnl_net_get_size(), gfp); if (!msg) goto out; err = rtnl_net_fill(msg, &fillargs); if (err < 0) goto err_out; rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp); return; err_out: nlmsg_free(msg); out: rtnl_set_sk_err(net, RTNLGRP_NSID, err); } #ifdef CONFIG_NET_NS static void __init netns_ipv4_struct_check(void) { /* TX readonly hotpath cache lines */ CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx, sysctl_tcp_early_retrans); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx, sysctl_tcp_tso_win_divisor); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx, sysctl_tcp_tso_rtt_log); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx, sysctl_tcp_autocorking); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx, sysctl_tcp_min_snd_mss); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx, sysctl_tcp_notsent_lowat); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx, sysctl_tcp_limit_output_bytes); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx, sysctl_tcp_min_rtt_wlen); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx, sysctl_tcp_wmem); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_tx, sysctl_ip_fwd_use_pmtu); CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_tx, 33); /* TXRX readonly hotpath cache lines */ CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_txrx, sysctl_tcp_moderate_rcvbuf); CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_txrx, 1); /* RX readonly hotpath cache line */ CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx, sysctl_ip_early_demux); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx, sysctl_tcp_early_demux); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx, sysctl_tcp_l3mdev_accept); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx, sysctl_tcp_reordering); CACHELINE_ASSERT_GROUP_MEMBER(struct netns_ipv4, netns_ipv4_read_rx, sysctl_tcp_rmem); CACHELINE_ASSERT_GROUP_SIZE(struct netns_ipv4, netns_ipv4_read_rx, 22); } #endif static const struct rtnl_msg_handler net_ns_rtnl_msg_handlers[] __initconst = { {.msgtype = RTM_NEWNSID, .doit = rtnl_net_newid, .flags = RTNL_FLAG_DOIT_UNLOCKED}, {.msgtype = RTM_GETNSID, .doit = rtnl_net_getid, .dumpit = rtnl_net_dumpid, .flags = RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED}, }; void __init net_ns_init(void) { struct net_generic *ng; #ifdef CONFIG_NET_NS netns_ipv4_struct_check(); net_cachep = kmem_cache_create("net_namespace", sizeof(struct net), SMP_CACHE_BYTES, SLAB_PANIC|SLAB_ACCOUNT, NULL); /* Create workqueue for cleanup */ netns_wq = create_singlethread_workqueue("netns"); if (!netns_wq) panic("Could not create netns workq"); #endif ng = net_alloc_generic(); if (!ng) panic("Could not allocate generic netns"); rcu_assign_pointer(init_net.gen, ng); #ifdef CONFIG_KEYS init_net.key_domain = &init_net_key_domain; #endif preinit_net(&init_net, &init_user_ns); down_write(&pernet_ops_rwsem); if (setup_net(&init_net)) panic("Could not setup the initial network namespace"); init_net_initialized = true; up_write(&pernet_ops_rwsem); if (register_pernet_subsys(&net_ns_ops)) panic("Could not register network namespace subsystems"); rtnl_register_many(net_ns_rtnl_msg_handlers); } #ifdef CONFIG_NET_NS static int __register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { LIST_HEAD(net_exit_list); struct net *net; int error; list_add_tail(&ops->list, list); if (ops->init || ops->id) { /* We held write locked pernet_ops_rwsem, and parallel * setup_net() and cleanup_net() are not possible. */ for_each_net(net) { error = ops_init(ops, net); if (error) goto out_undo; list_add_tail(&net->exit_list, &net_exit_list); } } return 0; out_undo: /* If I have an error cleanup all namespaces I initialized */ list_del(&ops->list); ops_undo_single(ops, &net_exit_list); return error; } static void __unregister_pernet_operations(struct pernet_operations *ops) { LIST_HEAD(net_exit_list); struct net *net; /* See comment in __register_pernet_operations() */ for_each_net(net) list_add_tail(&net->exit_list, &net_exit_list); list_del(&ops->list); ops_undo_single(ops, &net_exit_list); } #else static int __register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { if (!init_net_initialized) { list_add_tail(&ops->list, list); return 0; } return ops_init(ops, &init_net); } static void __unregister_pernet_operations(struct pernet_operations *ops) { if (!init_net_initialized) { list_del(&ops->list); } else { LIST_HEAD(net_exit_list); list_add(&init_net.exit_list, &net_exit_list); ops_undo_single(ops, &net_exit_list); } } #endif /* CONFIG_NET_NS */ static DEFINE_IDA(net_generic_ids); static int register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { int error; if (WARN_ON(!!ops->id ^ !!ops->size)) return -EINVAL; if (ops->id) { error = ida_alloc_min(&net_generic_ids, MIN_PERNET_OPS_ID, GFP_KERNEL); if (error < 0) return error; *ops->id = error; /* This does not require READ_ONCE as writers already hold * pernet_ops_rwsem. But WRITE_ONCE is needed to protect * net_alloc_generic. */ WRITE_ONCE(max_gen_ptrs, max(max_gen_ptrs, *ops->id + 1)); } error = __register_pernet_operations(list, ops); if (error) { rcu_barrier(); if (ops->id) ida_free(&net_generic_ids, *ops->id); } return error; } static void unregister_pernet_operations(struct pernet_operations *ops) { __unregister_pernet_operations(ops); rcu_barrier(); if (ops->id) ida_free(&net_generic_ids, *ops->id); } /** * register_pernet_subsys - register a network namespace subsystem * @ops: pernet operations structure for the subsystem * * Register a subsystem which has init and exit functions * that are called when network namespaces are created and * destroyed respectively. * * When registered all network namespace init functions are * called for every existing network namespace. Allowing kernel * modules to have a race free view of the set of network namespaces. * * When a new network namespace is created all of the init * methods are called in the order in which they were registered. * * When a network namespace is destroyed all of the exit methods * are called in the reverse of the order with which they were * registered. */ int register_pernet_subsys(struct pernet_operations *ops) { int error; down_write(&pernet_ops_rwsem); error = register_pernet_operations(first_device, ops); up_write(&pernet_ops_rwsem); return error; } EXPORT_SYMBOL_GPL(register_pernet_subsys); /** * unregister_pernet_subsys - unregister a network namespace subsystem * @ops: pernet operations structure to manipulate * * Remove the pernet operations structure from the list to be * used when network namespaces are created or destroyed. In * addition run the exit method for all existing network * namespaces. */ void unregister_pernet_subsys(struct pernet_operations *ops) { down_write(&pernet_ops_rwsem); unregister_pernet_operations(ops); up_write(&pernet_ops_rwsem); } EXPORT_SYMBOL_GPL(unregister_pernet_subsys); /** * register_pernet_device - register a network namespace device * @ops: pernet operations structure for the subsystem * * Register a device which has init and exit functions * that are called when network namespaces are created and * destroyed respectively. * * When registered all network namespace init functions are * called for every existing network namespace. Allowing kernel * modules to have a race free view of the set of network namespaces. * * When a new network namespace is created all of the init * methods are called in the order in which they were registered. * * When a network namespace is destroyed all of the exit methods * are called in the reverse of the order with which they were * registered. */ int register_pernet_device(struct pernet_operations *ops) { int error; down_write(&pernet_ops_rwsem); error = register_pernet_operations(&pernet_list, ops); if (!error && (first_device == &pernet_list)) first_device = &ops->list; up_write(&pernet_ops_rwsem); return error; } EXPORT_SYMBOL_GPL(register_pernet_device); /** * unregister_pernet_device - unregister a network namespace netdevice * @ops: pernet operations structure to manipulate * * Remove the pernet operations structure from the list to be * used when network namespaces are created or destroyed. In * addition run the exit method for all existing network * namespaces. */ void unregister_pernet_device(struct pernet_operations *ops) { down_write(&pernet_ops_rwsem); if (&ops->list == first_device) first_device = first_device->next; unregister_pernet_operations(ops); up_write(&pernet_ops_rwsem); } EXPORT_SYMBOL_GPL(unregister_pernet_device); #ifdef CONFIG_NET_NS static struct ns_common *netns_get(struct task_struct *task) { struct net *net = NULL; struct nsproxy *nsproxy; task_lock(task); nsproxy = task->nsproxy; if (nsproxy) net = get_net(nsproxy->net_ns); task_unlock(task); return net ? &net->ns : NULL; } static inline struct net *to_net_ns(struct ns_common *ns) { return container_of(ns, struct net, ns); } static void netns_put(struct ns_common *ns) { put_net(to_net_ns(ns)); } static int netns_install(struct nsset *nsset, struct ns_common *ns) { struct nsproxy *nsproxy = nsset->nsproxy; struct net *net = to_net_ns(ns); if (!ns_capable(net->user_ns, CAP_SYS_ADMIN) || !ns_capable(nsset->cred->user_ns, CAP_SYS_ADMIN)) return -EPERM; put_net(nsproxy->net_ns); nsproxy->net_ns = get_net(net); return 0; } static struct user_namespace *netns_owner(struct ns_common *ns) { return to_net_ns(ns)->user_ns; } const struct proc_ns_operations netns_operations = { .name = "net", .type = CLONE_NEWNET, .get = netns_get, .put = netns_put, .install = netns_install, .owner = netns_owner, }; #endif |
| 5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_FPU_XCR_H #define _ASM_X86_FPU_XCR_H #define XCR_XFEATURE_ENABLED_MASK 0x00000000 #define XCR_XFEATURE_IN_USE_MASK 0x00000001 static __always_inline u64 xgetbv(u32 index) { u32 eax, edx; asm volatile("xgetbv" : "=a" (eax), "=d" (edx) : "c" (index)); return eax + ((u64)edx << 32); } static inline void xsetbv(u32 index, u64 value) { u32 eax = value; u32 edx = value >> 32; asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index)); } /* * Return a mask of xfeatures which are currently being tracked * by the processor as being in the initial configuration. * * Callers should check X86_FEATURE_XGETBV1. */ static __always_inline u64 xfeatures_in_use(void) { return xgetbv(XCR_XFEATURE_IN_USE_MASK); } #endif /* _ASM_X86_FPU_XCR_H */ |
| 10 10 1 10 10 1 10 1 1 319 319 6 3 1 1 1 3 3 2 2 2 2 1 1 2 2 2 2 2 4 1 1 2 1 1 1 1 8 8 2 8 1 1 2 8 8 1 8 8 1 3 2 1 1 3 8 1 8 3 9 5 1 3 3 5 5 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 1 1 1 1 1 2 2 2 2 2 2 2 2 2 3 3 2 1 4 2 2 3 1 3 2 1 1 1 1 565 567 225 225 157 21 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 | // SPDX-License-Identifier: GPL-2.0-only #include <linux/netdevice.h> #include <linux/notifier.h> #include <linux/rtnetlink.h> #include <net/busy_poll.h> #include <net/net_namespace.h> #include <net/netdev_queues.h> #include <net/netdev_rx_queue.h> #include <net/sock.h> #include <net/xdp.h> #include <net/xdp_sock.h> #include <net/page_pool/memory_provider.h> #include "dev.h" #include "devmem.h" #include "netdev-genl-gen.h" struct netdev_nl_dump_ctx { unsigned long ifindex; unsigned int rxq_idx; unsigned int txq_idx; unsigned int napi_id; }; static struct netdev_nl_dump_ctx *netdev_dump_ctx(struct netlink_callback *cb) { NL_ASSERT_CTX_FITS(struct netdev_nl_dump_ctx); return (struct netdev_nl_dump_ctx *)cb->ctx; } static int netdev_nl_dev_fill(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { u64 xsk_features = 0; u64 xdp_rx_meta = 0; void *hdr; netdev_assert_locked(netdev); /* note: rtnl_lock may not be held! */ hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; #define XDP_METADATA_KFUNC(_, flag, __, xmo) \ if (netdev->xdp_metadata_ops && netdev->xdp_metadata_ops->xmo) \ xdp_rx_meta |= flag; XDP_METADATA_KFUNC_xxx #undef XDP_METADATA_KFUNC if (netdev->xsk_tx_metadata_ops) { if (netdev->xsk_tx_metadata_ops->tmo_fill_timestamp) xsk_features |= NETDEV_XSK_FLAGS_TX_TIMESTAMP; if (netdev->xsk_tx_metadata_ops->tmo_request_checksum) xsk_features |= NETDEV_XSK_FLAGS_TX_CHECKSUM; if (netdev->xsk_tx_metadata_ops->tmo_request_launch_time) xsk_features |= NETDEV_XSK_FLAGS_TX_LAUNCH_TIME_FIFO; } if (nla_put_u32(rsp, NETDEV_A_DEV_IFINDEX, netdev->ifindex) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_FEATURES, netdev->xdp_features, NETDEV_A_DEV_PAD) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XDP_RX_METADATA_FEATURES, xdp_rx_meta, NETDEV_A_DEV_PAD) || nla_put_u64_64bit(rsp, NETDEV_A_DEV_XSK_FEATURES, xsk_features, NETDEV_A_DEV_PAD)) goto err_cancel_msg; if (netdev->xdp_features & NETDEV_XDP_ACT_XSK_ZEROCOPY) { if (nla_put_u32(rsp, NETDEV_A_DEV_XDP_ZC_MAX_SEGS, netdev->xdp_zc_max_segs)) goto err_cancel_msg; } genlmsg_end(rsp, hdr); return 0; err_cancel_msg: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static void netdev_genl_dev_notify(struct net_device *netdev, int cmd) { struct genl_info info; struct sk_buff *ntf; if (!genl_has_listeners(&netdev_nl_family, dev_net(netdev), NETDEV_NLGRP_MGMT)) return; genl_info_init_ntf(&info, &netdev_nl_family, cmd); ntf = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!ntf) return; if (netdev_nl_dev_fill(netdev, ntf, &info)) { nlmsg_free(ntf); return; } genlmsg_multicast_netns(&netdev_nl_family, dev_net(netdev), ntf, 0, NETDEV_NLGRP_MGMT, GFP_KERNEL); } int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info) { struct net_device *netdev; struct sk_buff *rsp; u32 ifindex; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (!netdev) { err = -ENODEV; goto err_free_msg; } err = netdev_nl_dev_fill(netdev, rsp, info); netdev_unlock(netdev); if (err) goto err_free_msg; return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); struct net *net = sock_net(skb->sk); int err; for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_dev_fill(netdev, skb, genl_info_dump(cb)); if (err < 0) return err; } return 0; } static int netdev_nl_napi_fill_one(struct sk_buff *rsp, struct napi_struct *napi, const struct genl_info *info) { unsigned long irq_suspend_timeout; unsigned long gro_flush_timeout; u32 napi_defer_hard_irqs; void *hdr; pid_t pid; if (!napi->dev->up) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_NAPI_ID, napi->napi_id)) goto nla_put_failure; if (nla_put_u32(rsp, NETDEV_A_NAPI_IFINDEX, napi->dev->ifindex)) goto nla_put_failure; if (napi->irq >= 0 && nla_put_u32(rsp, NETDEV_A_NAPI_IRQ, napi->irq)) goto nla_put_failure; if (nla_put_uint(rsp, NETDEV_A_NAPI_THREADED, napi_get_threaded(napi))) goto nla_put_failure; if (napi->thread) { pid = task_pid_nr(napi->thread); if (nla_put_u32(rsp, NETDEV_A_NAPI_PID, pid)) goto nla_put_failure; } napi_defer_hard_irqs = napi_get_defer_hard_irqs(napi); if (nla_put_s32(rsp, NETDEV_A_NAPI_DEFER_HARD_IRQS, napi_defer_hard_irqs)) goto nla_put_failure; irq_suspend_timeout = napi_get_irq_suspend_timeout(napi); if (nla_put_uint(rsp, NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, irq_suspend_timeout)) goto nla_put_failure; gro_flush_timeout = napi_get_gro_flush_timeout(napi); if (nla_put_uint(rsp, NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, gro_flush_timeout)) goto nla_put_failure; genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info) { struct napi_struct *napi; struct sk_buff *rsp; u32 napi_id; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) return -EINVAL; napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_fill_one(rsp, napi, info); netdev_unlock(napi->dev); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } if (err) { goto err_free_msg; } else if (!rsp->len) { err = -ENOENT; goto err_free_msg; } return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } static int netdev_nl_napi_dump_one(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { struct napi_struct *napi; unsigned int prev_id; int err = 0; if (!netdev->up) return err; prev_id = UINT_MAX; list_for_each_entry(napi, &netdev->napi_list, dev_list) { if (!napi_id_valid(napi->napi_id)) continue; /* Dump continuation below depends on the list being sorted */ WARN_ON_ONCE(napi->napi_id >= prev_id); prev_id = napi->napi_id; if (ctx->napi_id && napi->napi_id >= ctx->napi_id) continue; err = netdev_nl_napi_fill_one(rsp, napi, info); if (err) return err; ctx->napi_id = napi->napi_id; } return err; } int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; u32 ifindex = 0; int err = 0; if (info->attrs[NETDEV_A_NAPI_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_NAPI_IFINDEX]); if (ifindex) { netdev = netdev_get_by_index_lock(net, ifindex); if (netdev) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); netdev_unlock(netdev); } else { err = -ENODEV; } } else { for_each_netdev_lock_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_napi_dump_one(netdev, skb, info, ctx); if (err < 0) break; ctx->napi_id = 0; } } return err; } static int netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info) { u64 irq_suspend_timeout = 0; u64 gro_flush_timeout = 0; u8 threaded = 0; u32 defer = 0; if (info->attrs[NETDEV_A_NAPI_THREADED]) { int ret; threaded = nla_get_uint(info->attrs[NETDEV_A_NAPI_THREADED]); ret = napi_set_threaded(napi, threaded); if (ret) return ret; } if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) { defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]); napi_set_defer_hard_irqs(napi, defer); } if (info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]) { irq_suspend_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT]); napi_set_irq_suspend_timeout(napi, irq_suspend_timeout); } if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) { gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]); napi_set_gro_flush_timeout(napi, gro_flush_timeout); } return 0; } int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info) { struct napi_struct *napi; unsigned int napi_id; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID)) return -EINVAL; napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]); napi = netdev_napi_by_id_lock(genl_info_net(info), napi_id); if (napi) { err = netdev_nl_napi_set_config(napi, info); netdev_unlock(napi->dev); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]); err = -ENOENT; } return err; } static int nla_put_napi_id(struct sk_buff *skb, const struct napi_struct *napi) { if (napi && napi_id_valid(napi->napi_id)) return nla_put_u32(skb, NETDEV_A_QUEUE_NAPI_ID, napi->napi_id); return 0; } static int netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { struct pp_memory_provider_params *params; struct netdev_rx_queue *rxq; struct netdev_queue *txq; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QUEUE_ID, q_idx) || nla_put_u32(rsp, NETDEV_A_QUEUE_TYPE, q_type) || nla_put_u32(rsp, NETDEV_A_QUEUE_IFINDEX, netdev->ifindex)) goto nla_put_failure; switch (q_type) { case NETDEV_QUEUE_TYPE_RX: rxq = __netif_get_rx_queue(netdev, q_idx); if (nla_put_napi_id(rsp, rxq->napi)) goto nla_put_failure; params = &rxq->mp_params; if (params->mp_ops && params->mp_ops->nl_fill(params->mp_priv, rsp, rxq)) goto nla_put_failure; #ifdef CONFIG_XDP_SOCKETS if (rxq->pool) if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) goto nla_put_failure; #endif break; case NETDEV_QUEUE_TYPE_TX: txq = netdev_get_tx_queue(netdev, q_idx); if (nla_put_napi_id(rsp, txq->napi)) goto nla_put_failure; #ifdef CONFIG_XDP_SOCKETS if (txq->pool) if (nla_put_empty_nest(rsp, NETDEV_A_QUEUE_XSK)) goto nla_put_failure; #endif break; } genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, u32 q_type) { switch (q_type) { case NETDEV_QUEUE_TYPE_RX: if (q_id >= netdev->real_num_rx_queues) return -EINVAL; return 0; case NETDEV_QUEUE_TYPE_TX: if (q_id >= netdev->real_num_tx_queues) return -EINVAL; } return 0; } static int netdev_nl_queue_fill(struct sk_buff *rsp, struct net_device *netdev, u32 q_idx, u32 q_type, const struct genl_info *info) { int err; if (!netdev->up) return -ENOENT; err = netdev_nl_queue_validate(netdev, q_idx, q_type); if (err) return err; return netdev_nl_queue_fill_one(rsp, netdev, q_idx, q_type, info); } int netdev_nl_queue_get_doit(struct sk_buff *skb, struct genl_info *info) { u32 q_id, q_type, ifindex; struct net_device *netdev; struct sk_buff *rsp; int err; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_ID) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_TYPE) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_QUEUE_IFINDEX)) return -EINVAL; q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); q_type = nla_get_u32(info->attrs[NETDEV_A_QUEUE_TYPE]); ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; netdev = netdev_get_by_index_lock_ops_compat(genl_info_net(info), ifindex); if (netdev) { err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); netdev_unlock_ops_compat(netdev); } else { err = -ENODEV; } if (err) goto err_free_msg; return genlmsg_reply(rsp, info); err_free_msg: nlmsg_free(rsp); return err; } static int netdev_nl_queue_dump_one(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { int err = 0; if (!netdev->up) return err; for (; ctx->rxq_idx < netdev->real_num_rx_queues; ctx->rxq_idx++) { err = netdev_nl_queue_fill_one(rsp, netdev, ctx->rxq_idx, NETDEV_QUEUE_TYPE_RX, info); if (err) return err; } for (; ctx->txq_idx < netdev->real_num_tx_queues; ctx->txq_idx++) { err = netdev_nl_queue_fill_one(rsp, netdev, ctx->txq_idx, NETDEV_QUEUE_TYPE_TX, info); if (err) return err; } return err; } int netdev_nl_queue_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; u32 ifindex = 0; int err = 0; if (info->attrs[NETDEV_A_QUEUE_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QUEUE_IFINDEX]); if (ifindex) { netdev = netdev_get_by_index_lock_ops_compat(net, ifindex); if (netdev) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); netdev_unlock_ops_compat(netdev); } else { err = -ENODEV; } } else { for_each_netdev_lock_ops_compat_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_queue_dump_one(netdev, skb, info, ctx); if (err < 0) break; ctx->rxq_idx = 0; ctx->txq_idx = 0; } } return err; } #define NETDEV_STAT_NOT_SET (~0ULL) static void netdev_nl_stats_add(void *_sum, const void *_add, size_t size) { const u64 *add = _add; u64 *sum = _sum; while (size) { if (*add != NETDEV_STAT_NOT_SET && *sum != NETDEV_STAT_NOT_SET) *sum += *add; sum++; add++; size -= 8; } } static int netdev_stat_put(struct sk_buff *rsp, unsigned int attr_id, u64 value) { if (value == NETDEV_STAT_NOT_SET) return 0; return nla_put_uint(rsp, attr_id, value); } static int netdev_nl_stats_write_rx(struct sk_buff *rsp, struct netdev_queue_stats_rx *rx) { if (netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_PACKETS, rx->packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_BYTES, rx->bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_ALLOC_FAIL, rx->alloc_fail) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROPS, rx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_OVERRUNS, rx->hw_drop_overruns) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_COMPLETE, rx->csum_complete) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_UNNECESSARY, rx->csum_unnecessary) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_NONE, rx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_CSUM_BAD, rx->csum_bad) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_PACKETS, rx->hw_gro_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_BYTES, rx->hw_gro_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_PACKETS, rx->hw_gro_wire_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_GRO_WIRE_BYTES, rx->hw_gro_wire_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_RX_HW_DROP_RATELIMITS, rx->hw_drop_ratelimits)) return -EMSGSIZE; return 0; } static int netdev_nl_stats_write_tx(struct sk_buff *rsp, struct netdev_queue_stats_tx *tx) { if (netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_PACKETS, tx->packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_BYTES, tx->bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROPS, tx->hw_drops) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_ERRORS, tx->hw_drop_errors) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_CSUM_NONE, tx->csum_none) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_NEEDS_CSUM, tx->needs_csum) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_PACKETS, tx->hw_gso_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_BYTES, tx->hw_gso_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_PACKETS, tx->hw_gso_wire_packets) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_GSO_WIRE_BYTES, tx->hw_gso_wire_bytes) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_HW_DROP_RATELIMITS, tx->hw_drop_ratelimits) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_STOP, tx->stop) || netdev_stat_put(rsp, NETDEV_A_QSTATS_TX_WAKE, tx->wake)) return -EMSGSIZE; return 0; } static int netdev_nl_stats_queue(struct net_device *netdev, struct sk_buff *rsp, u32 q_type, int i, const struct genl_info *info) { const struct netdev_stat_ops *ops = netdev->stat_ops; struct netdev_queue_stats_rx rx; struct netdev_queue_stats_tx tx; void *hdr; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex) || nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_TYPE, q_type) || nla_put_u32(rsp, NETDEV_A_QSTATS_QUEUE_ID, i)) goto nla_put_failure; switch (q_type) { case NETDEV_QUEUE_TYPE_RX: memset(&rx, 0xff, sizeof(rx)); ops->get_queue_stats_rx(netdev, i, &rx); if (!memchr_inv(&rx, 0xff, sizeof(rx))) goto nla_cancel; if (netdev_nl_stats_write_rx(rsp, &rx)) goto nla_put_failure; break; case NETDEV_QUEUE_TYPE_TX: memset(&tx, 0xff, sizeof(tx)); ops->get_queue_stats_tx(netdev, i, &tx); if (!memchr_inv(&tx, 0xff, sizeof(tx))) goto nla_cancel; if (netdev_nl_stats_write_tx(rsp, &tx)) goto nla_put_failure; break; } genlmsg_end(rsp, hdr); return 0; nla_cancel: genlmsg_cancel(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { const struct netdev_stat_ops *ops = netdev->stat_ops; int i, err; if (!(netdev->flags & IFF_UP)) return 0; i = ctx->rxq_idx; while (ops->get_queue_stats_rx && i < netdev->real_num_rx_queues) { err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_RX, i, info); if (err) return err; ctx->rxq_idx = ++i; } i = ctx->txq_idx; while (ops->get_queue_stats_tx && i < netdev->real_num_tx_queues) { err = netdev_nl_stats_queue(netdev, rsp, NETDEV_QUEUE_TYPE_TX, i, info); if (err) return err; ctx->txq_idx = ++i; } ctx->rxq_idx = 0; ctx->txq_idx = 0; return 0; } /** * netdev_stat_queue_sum() - add up queue stats from range of queues * @netdev: net_device * @rx_start: index of the first Rx queue to query * @rx_end: index after the last Rx queue (first *not* to query) * @rx_sum: output Rx stats, should be already initialized * @tx_start: index of the first Tx queue to query * @tx_end: index after the last Tx queue (first *not* to query) * @tx_sum: output Tx stats, should be already initialized * * Add stats from [start, end) range of queue IDs to *x_sum structs. * The sum structs must be already initialized. Usually this * helper is invoked from the .get_base_stats callbacks of drivers * to account for stats of disabled queues. In that case the ranges * are usually [netdev->real_num_*x_queues, netdev->num_*x_queues). */ void netdev_stat_queue_sum(struct net_device *netdev, int rx_start, int rx_end, struct netdev_queue_stats_rx *rx_sum, int tx_start, int tx_end, struct netdev_queue_stats_tx *tx_sum) { const struct netdev_stat_ops *ops; struct netdev_queue_stats_rx rx; struct netdev_queue_stats_tx tx; int i; ops = netdev->stat_ops; for (i = rx_start; i < rx_end; i++) { memset(&rx, 0xff, sizeof(rx)); if (ops->get_queue_stats_rx) ops->get_queue_stats_rx(netdev, i, &rx); netdev_nl_stats_add(rx_sum, &rx, sizeof(rx)); } for (i = tx_start; i < tx_end; i++) { memset(&tx, 0xff, sizeof(tx)); if (ops->get_queue_stats_tx) ops->get_queue_stats_tx(netdev, i, &tx); netdev_nl_stats_add(tx_sum, &tx, sizeof(tx)); } } EXPORT_SYMBOL(netdev_stat_queue_sum); static int netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp, const struct genl_info *info) { struct netdev_queue_stats_rx rx_sum; struct netdev_queue_stats_tx tx_sum; void *hdr; /* Netdev can't guarantee any complete counters */ if (!netdev->stat_ops->get_base_stats) return 0; memset(&rx_sum, 0xff, sizeof(rx_sum)); memset(&tx_sum, 0xff, sizeof(tx_sum)); netdev->stat_ops->get_base_stats(netdev, &rx_sum, &tx_sum); /* The op was there, but nothing reported, don't bother */ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) && !memchr_inv(&tx_sum, 0xff, sizeof(tx_sum))) return 0; hdr = genlmsg_iput(rsp, info); if (!hdr) return -EMSGSIZE; if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex)) goto nla_put_failure; netdev_stat_queue_sum(netdev, 0, netdev->real_num_rx_queues, &rx_sum, 0, netdev->real_num_tx_queues, &tx_sum); if (netdev_nl_stats_write_rx(rsp, &rx_sum) || netdev_nl_stats_write_tx(rsp, &tx_sum)) goto nla_put_failure; genlmsg_end(rsp, hdr); return 0; nla_put_failure: genlmsg_cancel(rsp, hdr); return -EMSGSIZE; } static int netdev_nl_qstats_get_dump_one(struct net_device *netdev, unsigned int scope, struct sk_buff *skb, const struct genl_info *info, struct netdev_nl_dump_ctx *ctx) { if (!netdev->stat_ops) return 0; switch (scope) { case 0: return netdev_nl_stats_by_netdev(netdev, skb, info); case NETDEV_QSTATS_SCOPE_QUEUE: return netdev_nl_stats_by_queue(netdev, skb, info, ctx); } return -EINVAL; /* Should not happen, per netlink policy */ } int netdev_nl_qstats_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { struct netdev_nl_dump_ctx *ctx = netdev_dump_ctx(cb); const struct genl_info *info = genl_info_dump(cb); struct net *net = sock_net(skb->sk); struct net_device *netdev; unsigned int ifindex; unsigned int scope; int err = 0; scope = 0; if (info->attrs[NETDEV_A_QSTATS_SCOPE]) scope = nla_get_uint(info->attrs[NETDEV_A_QSTATS_SCOPE]); ifindex = 0; if (info->attrs[NETDEV_A_QSTATS_IFINDEX]) ifindex = nla_get_u32(info->attrs[NETDEV_A_QSTATS_IFINDEX]); if (ifindex) { netdev = netdev_get_by_index_lock_ops_compat(net, ifindex); if (!netdev) { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_QSTATS_IFINDEX]); return -ENODEV; } if (netdev->stat_ops) { err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, info, ctx); } else { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_QSTATS_IFINDEX]); err = -EOPNOTSUPP; } netdev_unlock_ops_compat(netdev); return err; } for_each_netdev_lock_ops_compat_scoped(net, netdev, ctx->ifindex) { err = netdev_nl_qstats_get_dump_one(netdev, scope, skb, info, ctx); if (err < 0) break; } return err; } int netdev_nl_bind_rx_doit(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[ARRAY_SIZE(netdev_queue_id_nl_policy)]; struct net_devmem_dmabuf_binding *binding; u32 ifindex, dmabuf_fd, rxq_idx; struct netdev_nl_sock *priv; struct net_device *netdev; struct sk_buff *rsp; struct nlattr *attr; int rem, err = 0; void *hdr; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_QUEUES)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); priv = genl_sk_priv_get(&netdev_nl_family, NETLINK_CB(skb).sk); if (IS_ERR(priv)) return PTR_ERR(priv); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; hdr = genlmsg_iput(rsp, info); if (!hdr) { err = -EMSGSIZE; goto err_genlmsg_free; } mutex_lock(&priv->lock); err = 0; netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (!netdev) { err = -ENODEV; goto err_unlock_sock; } if (!netif_device_present(netdev)) err = -ENODEV; else if (!netdev_need_ops_lock(netdev)) err = -EOPNOTSUPP; if (err) { NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_DEV_IFINDEX]); goto err_unlock; } binding = net_devmem_bind_dmabuf(netdev, DMA_FROM_DEVICE, dmabuf_fd, priv, info->extack); if (IS_ERR(binding)) { err = PTR_ERR(binding); goto err_unlock; } nla_for_each_attr_type(attr, NETDEV_A_DMABUF_QUEUES, genlmsg_data(info->genlhdr), genlmsg_len(info->genlhdr), rem) { err = nla_parse_nested( tb, ARRAY_SIZE(netdev_queue_id_nl_policy) - 1, attr, netdev_queue_id_nl_policy, info->extack); if (err < 0) goto err_unbind; if (NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_ID) || NL_REQ_ATTR_CHECK(info->extack, attr, tb, NETDEV_A_QUEUE_TYPE)) { err = -EINVAL; goto err_unbind; } if (nla_get_u32(tb[NETDEV_A_QUEUE_TYPE]) != NETDEV_QUEUE_TYPE_RX) { NL_SET_BAD_ATTR(info->extack, tb[NETDEV_A_QUEUE_TYPE]); err = -EINVAL; goto err_unbind; } rxq_idx = nla_get_u32(tb[NETDEV_A_QUEUE_ID]); err = net_devmem_bind_dmabuf_to_queue(netdev, rxq_idx, binding, info->extack); if (err) goto err_unbind; } nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); genlmsg_end(rsp, hdr); err = genlmsg_reply(rsp, info); if (err) goto err_unbind; netdev_unlock(netdev); mutex_unlock(&priv->lock); return 0; err_unbind: net_devmem_unbind_dmabuf(binding); err_unlock: netdev_unlock(netdev); err_unlock_sock: mutex_unlock(&priv->lock); err_genlmsg_free: nlmsg_free(rsp); return err; } int netdev_nl_bind_tx_doit(struct sk_buff *skb, struct genl_info *info) { struct net_devmem_dmabuf_binding *binding; struct netdev_nl_sock *priv; struct net_device *netdev; u32 ifindex, dmabuf_fd; struct sk_buff *rsp; int err = 0; void *hdr; if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_DEV_IFINDEX) || GENL_REQ_ATTR_CHECK(info, NETDEV_A_DMABUF_FD)) return -EINVAL; ifindex = nla_get_u32(info->attrs[NETDEV_A_DEV_IFINDEX]); dmabuf_fd = nla_get_u32(info->attrs[NETDEV_A_DMABUF_FD]); priv = genl_sk_priv_get(&netdev_nl_family, NETLINK_CB(skb).sk); if (IS_ERR(priv)) return PTR_ERR(priv); rsp = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!rsp) return -ENOMEM; hdr = genlmsg_iput(rsp, info); if (!hdr) { err = -EMSGSIZE; goto err_genlmsg_free; } mutex_lock(&priv->lock); netdev = netdev_get_by_index_lock(genl_info_net(info), ifindex); if (!netdev) { err = -ENODEV; goto err_unlock_sock; } if (!netif_device_present(netdev)) { err = -ENODEV; goto err_unlock_netdev; } if (!netdev->netmem_tx) { err = -EOPNOTSUPP; NL_SET_ERR_MSG(info->extack, "Driver does not support netmem TX"); goto err_unlock_netdev; } binding = net_devmem_bind_dmabuf(netdev, DMA_TO_DEVICE, dmabuf_fd, priv, info->extack); if (IS_ERR(binding)) { err = PTR_ERR(binding); goto err_unlock_netdev; } nla_put_u32(rsp, NETDEV_A_DMABUF_ID, binding->id); genlmsg_end(rsp, hdr); netdev_unlock(netdev); mutex_unlock(&priv->lock); return genlmsg_reply(rsp, info); err_unlock_netdev: netdev_unlock(netdev); err_unlock_sock: mutex_unlock(&priv->lock); err_genlmsg_free: nlmsg_free(rsp); return err; } void netdev_nl_sock_priv_init(struct netdev_nl_sock *priv) { INIT_LIST_HEAD(&priv->bindings); mutex_init(&priv->lock); } void netdev_nl_sock_priv_destroy(struct netdev_nl_sock *priv) { struct net_devmem_dmabuf_binding *binding; struct net_devmem_dmabuf_binding *temp; netdevice_tracker dev_tracker; struct net_device *dev; mutex_lock(&priv->lock); list_for_each_entry_safe(binding, temp, &priv->bindings, list) { mutex_lock(&binding->lock); dev = binding->dev; if (!dev) { mutex_unlock(&binding->lock); net_devmem_unbind_dmabuf(binding); continue; } netdev_hold(dev, &dev_tracker, GFP_KERNEL); mutex_unlock(&binding->lock); netdev_lock(dev); net_devmem_unbind_dmabuf(binding); netdev_unlock(dev); netdev_put(dev, &dev_tracker); } mutex_unlock(&priv->lock); } static int netdev_genl_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); switch (event) { case NETDEV_REGISTER: netdev_lock_ops_to_full(netdev); netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_ADD_NTF); netdev_unlock_full_to_ops(netdev); break; case NETDEV_UNREGISTER: netdev_lock(netdev); netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_DEL_NTF); netdev_unlock(netdev); break; case NETDEV_XDP_FEAT_CHANGE: netdev_genl_dev_notify(netdev, NETDEV_CMD_DEV_CHANGE_NTF); break; } return NOTIFY_OK; } static struct notifier_block netdev_genl_nb = { .notifier_call = netdev_genl_netdevice_event, }; static int __init netdev_genl_init(void) { int err; err = register_netdevice_notifier(&netdev_genl_nb); if (err) return err; err = genl_register_family(&netdev_nl_family); if (err) goto err_unreg_ntf; return 0; err_unreg_ntf: unregister_netdevice_notifier(&netdev_genl_nb); return err; } subsys_initcall(netdev_genl_init); |
| 98 96 3 48 98 80 1 17 100 76 1 22 1 95 162 14 10 92 77 1 1 1 1 1 11 134 131 3 131 49 2 57 2 1 23 26 1 1 1 24 4 1 4 9 38 1 25 1 49 1 49 1 1 1 1 2 5 15 1 1 1 1 1 1 1 2 8 14 1 1 1 1 1 1 1 2 5 82 1 1 1 1 1 83 83 1 63 3 9 67 2 8 72 58 1 12 63 1 1 3 12 61 3 19 38 33 4 26 41 41 36 36 35 1 35 2 35 1 34 1 36 3 69 82 74 9 68 15 80 1 83 80 6 72 77 75 68 2 72 69 1 72 71 1 70 33 37 1 2 28 8 31 51 134 134 83 31 24 9 9 3 24 31 6 6 4 1 1 6 4 4 55 55 18 17 18 18 10 10 4 4 12 4 4 53 53 1 54 54 48 7 54 44 10 38 18 51 4 44 12 50 4 54 53 2 2 51 54 54 54 53 54 54 54 54 17 17 5 2 3 7 1 3 3 3 1 14 134 136 130 3 1 1 7 4 3 2 2 3 2 1 1 1 1 1 1 1 1 1 1 2 2 22 3 19 18 1 18 1 12 6 18 4 14 14 13 1 18 4 6 5 107 2 2 26 2 73 2 3 2 91 2 47 23 23 22 57 2 19 42 44 38 34 7 6 6 43 6 49 6 48 6 30 27 19 125 11 81 77 77 76 76 47 1 49 46 2 44 4 80 5 73 31 1 40 78 37 42 23 7 5 5 4 1 5 5 5 1 3 3 2 3 3 3 2 1 2 1 2 1 3 3 1 1 24 24 1 21 1 21 4 17 17 2 2 2 4 1 1 1 2 2 2 4 3 1 1 2 2 5 5 3 3 10 10 1 7 1 8 1 6 4 1 1 2 2 2 2 1 1 9 10 8 2 2 6 1 4 3 23 2 12 27 1 26 25 23 1 23 2 24 1 8 2 9 1 10 10 10 325 3 1 317 9 346 1 349 1 343 31 21 10 317 1 7 329 1 316 13 351 1 1 1 1 1 7 28 9 22 14 34 4 33 2 34 37 37 37 37 37 45 35 1 37 7 24 23 23 1 23 24 23 22 24 24 35 17 18 8 6 1 4 2 2 2 2 2 2 2 46 1 2 3 46 3 45 48 7 55 48 48 7 2 47 50 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 | // SPDX-License-Identifier: GPL-2.0-only /* xfrm_user.c: User interface to configure xfrm engine. * * Copyright (C) 2002 David S. Miller (davem@redhat.com) * * Changes: * Mitsuru KANDA @USAGI * Kazunori MIYAZAWA @USAGI * Kunihiro Ishiguro <kunihiro@ipinfusion.com> * IPv6 support * */ #include <linux/compat.h> #include <linux/crypto.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/string.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/pfkeyv2.h> #include <linux/ipsec.h> #include <linux/init.h> #include <linux/security.h> #include <net/sock.h> #include <net/xfrm.h> #include <net/netlink.h> #include <net/ah.h> #include <linux/uaccess.h> #if IS_ENABLED(CONFIG_IPV6) #include <linux/in6.h> #endif #include <linux/unaligned.h> static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type, struct netlink_ext_ack *extack) { struct nlattr *rt = attrs[type]; struct xfrm_algo *algp; if (!rt) return 0; algp = nla_data(rt); if (nla_len(rt) < (int)xfrm_alg_len(algp)) { NL_SET_ERR_MSG(extack, "Invalid AUTH/CRYPT/COMP attribute length"); return -EINVAL; } switch (type) { case XFRMA_ALG_AUTH: case XFRMA_ALG_CRYPT: case XFRMA_ALG_COMP: break; default: NL_SET_ERR_MSG(extack, "Invalid algorithm attribute type"); return -EINVAL; } algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; return 0; } static int verify_auth_trunc(struct nlattr **attrs, struct netlink_ext_ack *extack) { struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC]; struct xfrm_algo_auth *algp; if (!rt) return 0; algp = nla_data(rt); if (nla_len(rt) < (int)xfrm_alg_auth_len(algp)) { NL_SET_ERR_MSG(extack, "Invalid AUTH_TRUNC attribute length"); return -EINVAL; } algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; return 0; } static int verify_aead(struct nlattr **attrs, struct netlink_ext_ack *extack) { struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; struct xfrm_algo_aead *algp; if (!rt) return 0; algp = nla_data(rt); if (nla_len(rt) < (int)aead_len(algp)) { NL_SET_ERR_MSG(extack, "Invalid AEAD attribute length"); return -EINVAL; } algp->alg_name[sizeof(algp->alg_name) - 1] = '\0'; return 0; } static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, xfrm_address_t **addrp) { struct nlattr *rt = attrs[type]; if (rt && addrp) *addrp = nla_data(rt); } static inline int verify_sec_ctx_len(struct nlattr **attrs, struct netlink_ext_ack *extack) { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_user_sec_ctx *uctx; if (!rt) return 0; uctx = nla_data(rt); if (uctx->len > nla_len(rt) || uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) { NL_SET_ERR_MSG(extack, "Invalid security context length"); return -EINVAL; } return 0; } static inline int verify_replay(struct xfrm_usersa_info *p, struct nlattr **attrs, u8 sa_dir, struct netlink_ext_ack *extack) { struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; struct xfrm_replay_state_esn *rs; if (!rt) { if (p->flags & XFRM_STATE_ESN) { NL_SET_ERR_MSG(extack, "Missing required attribute for ESN"); return -EINVAL; } return 0; } rs = nla_data(rt); if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) { NL_SET_ERR_MSG(extack, "ESN bitmap length must be <= 128"); return -EINVAL; } if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) && nla_len(rt) != sizeof(*rs)) { NL_SET_ERR_MSG(extack, "ESN attribute is too short to fit the full bitmap length"); return -EINVAL; } /* As only ESP and AH support ESN feature. */ if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) { NL_SET_ERR_MSG(extack, "ESN only supported for ESP and AH"); return -EINVAL; } if (p->replay_window != 0) { NL_SET_ERR_MSG(extack, "ESN not compatible with legacy replay_window"); return -EINVAL; } if (sa_dir == XFRM_SA_DIR_OUT) { if (rs->replay_window) { NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA"); return -EINVAL; } if (rs->seq || rs->seq_hi) { NL_SET_ERR_MSG(extack, "Replay seq and seq_hi should be 0 for output SA"); return -EINVAL; } if (!(p->flags & XFRM_STATE_ESN)) { if (rs->oseq_hi) { NL_SET_ERR_MSG( extack, "Replay oseq_hi should be 0 in non-ESN mode for output SA"); return -EINVAL; } if (rs->oseq == U32_MAX) { NL_SET_ERR_MSG( extack, "Replay oseq should be less than 0xFFFFFFFF in non-ESN mode for output SA"); return -EINVAL; } } else { if (rs->oseq == U32_MAX && rs->oseq_hi == U32_MAX) { NL_SET_ERR_MSG( extack, "Replay oseq and oseq_hi should be less than 0xFFFFFFFF for output SA"); return -EINVAL; } } if (rs->bmp_len) { NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA"); return -EINVAL; } } if (sa_dir == XFRM_SA_DIR_IN) { if (rs->oseq || rs->oseq_hi) { NL_SET_ERR_MSG(extack, "Replay oseq and oseq_hi should be 0 for input SA"); return -EINVAL; } if (!(p->flags & XFRM_STATE_ESN)) { if (rs->seq_hi) { NL_SET_ERR_MSG( extack, "Replay seq_hi should be 0 in non-ESN mode for input SA"); return -EINVAL; } if (rs->seq == U32_MAX) { NL_SET_ERR_MSG( extack, "Replay seq should be less than 0xFFFFFFFF in non-ESN mode for input SA"); return -EINVAL; } } else { if (rs->seq == U32_MAX && rs->seq_hi == U32_MAX) { NL_SET_ERR_MSG( extack, "Replay seq and seq_hi should be less than 0xFFFFFFFF for input SA"); return -EINVAL; } } } return 0; } static int verify_newsa_info(struct xfrm_usersa_info *p, struct nlattr **attrs, struct netlink_ext_ack *extack) { int err; u8 sa_dir = nla_get_u8_default(attrs[XFRMA_SA_DIR], 0); u16 family = p->sel.family; err = -EINVAL; switch (p->family) { case AF_INET: break; case AF_INET6: #if IS_ENABLED(CONFIG_IPV6) break; #else err = -EAFNOSUPPORT; NL_SET_ERR_MSG(extack, "IPv6 support disabled"); goto out; #endif default: NL_SET_ERR_MSG(extack, "Invalid address family"); goto out; } if (!family && !(p->flags & XFRM_STATE_AF_UNSPEC)) family = p->family; switch (family) { case AF_UNSPEC: break; case AF_INET: if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) { NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)"); goto out; } break; case AF_INET6: #if IS_ENABLED(CONFIG_IPV6) if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) { NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)"); goto out; } break; #else NL_SET_ERR_MSG(extack, "IPv6 support disabled"); err = -EAFNOSUPPORT; goto out; #endif default: NL_SET_ERR_MSG(extack, "Invalid address family in selector"); goto out; } err = -EINVAL; switch (p->id.proto) { case IPPROTO_AH: if (!attrs[XFRMA_ALG_AUTH] && !attrs[XFRMA_ALG_AUTH_TRUNC]) { NL_SET_ERR_MSG(extack, "Missing required attribute for AH: AUTH_TRUNC or AUTH"); goto out; } if (attrs[XFRMA_ALG_AEAD] || attrs[XFRMA_ALG_CRYPT] || attrs[XFRMA_ALG_COMP] || attrs[XFRMA_TFCPAD]) { NL_SET_ERR_MSG(extack, "Invalid attributes for AH: AEAD, CRYPT, COMP, TFCPAD"); goto out; } break; case IPPROTO_ESP: if (attrs[XFRMA_ALG_COMP]) { NL_SET_ERR_MSG(extack, "Invalid attribute for ESP: COMP"); goto out; } if (!attrs[XFRMA_ALG_AUTH] && !attrs[XFRMA_ALG_AUTH_TRUNC] && !attrs[XFRMA_ALG_CRYPT] && !attrs[XFRMA_ALG_AEAD]) { NL_SET_ERR_MSG(extack, "Missing required attribute for ESP: at least one of AUTH, AUTH_TRUNC, CRYPT, AEAD"); goto out; } if ((attrs[XFRMA_ALG_AUTH] || attrs[XFRMA_ALG_AUTH_TRUNC] || attrs[XFRMA_ALG_CRYPT]) && attrs[XFRMA_ALG_AEAD]) { NL_SET_ERR_MSG(extack, "Invalid attribute combination for ESP: AEAD can't be used with AUTH, AUTH_TRUNC, CRYPT"); goto out; } if (attrs[XFRMA_TFCPAD] && p->mode != XFRM_MODE_TUNNEL) { NL_SET_ERR_MSG(extack, "TFC padding can only be used in tunnel mode"); goto out; } if ((attrs[XFRMA_IPTFS_DROP_TIME] || attrs[XFRMA_IPTFS_REORDER_WINDOW] || attrs[XFRMA_IPTFS_DONT_FRAG] || attrs[XFRMA_IPTFS_INIT_DELAY] || attrs[XFRMA_IPTFS_MAX_QSIZE] || attrs[XFRMA_IPTFS_PKT_SIZE]) && p->mode != XFRM_MODE_IPTFS) { NL_SET_ERR_MSG(extack, "IP-TFS options can only be used in IP-TFS mode"); goto out; } break; case IPPROTO_COMP: if (!attrs[XFRMA_ALG_COMP]) { NL_SET_ERR_MSG(extack, "Missing required attribute for COMP: COMP"); goto out; } if (attrs[XFRMA_ALG_AEAD] || attrs[XFRMA_ALG_AUTH] || attrs[XFRMA_ALG_AUTH_TRUNC] || attrs[XFRMA_ALG_CRYPT] || attrs[XFRMA_TFCPAD]) { NL_SET_ERR_MSG(extack, "Invalid attributes for COMP: AEAD, AUTH, AUTH_TRUNC, CRYPT, TFCPAD"); goto out; } if (ntohl(p->id.spi) >= 0x10000) { NL_SET_ERR_MSG(extack, "SPI is too large for COMP (must be < 0x10000)"); goto out; } break; #if IS_ENABLED(CONFIG_IPV6) case IPPROTO_DSTOPTS: case IPPROTO_ROUTING: if (attrs[XFRMA_ALG_COMP] || attrs[XFRMA_ALG_AUTH] || attrs[XFRMA_ALG_AUTH_TRUNC] || attrs[XFRMA_ALG_AEAD] || attrs[XFRMA_ALG_CRYPT] || attrs[XFRMA_ENCAP] || attrs[XFRMA_SEC_CTX] || attrs[XFRMA_TFCPAD]) { NL_SET_ERR_MSG(extack, "Invalid attributes for DSTOPTS/ROUTING"); goto out; } if (!attrs[XFRMA_COADDR]) { NL_SET_ERR_MSG(extack, "Missing required COADDR attribute for DSTOPTS/ROUTING"); goto out; } break; #endif default: NL_SET_ERR_MSG(extack, "Unsupported protocol"); goto out; } if ((err = verify_aead(attrs, extack))) goto out; if ((err = verify_auth_trunc(attrs, extack))) goto out; if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH, extack))) goto out; if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT, extack))) goto out; if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP, extack))) goto out; if ((err = verify_sec_ctx_len(attrs, extack))) goto out; if ((err = verify_replay(p, attrs, sa_dir, extack))) goto out; err = -EINVAL; switch (p->mode) { case XFRM_MODE_TRANSPORT: case XFRM_MODE_TUNNEL: case XFRM_MODE_ROUTEOPTIMIZATION: case XFRM_MODE_BEET: break; case XFRM_MODE_IPTFS: if (p->id.proto != IPPROTO_ESP) { NL_SET_ERR_MSG(extack, "IP-TFS mode only supported with ESP"); goto out; } if (sa_dir == 0) { NL_SET_ERR_MSG(extack, "IP-TFS mode requires in or out direction attribute"); goto out; } break; default: NL_SET_ERR_MSG(extack, "Unsupported mode"); goto out; } err = 0; if (attrs[XFRMA_MTIMER_THRESH]) { if (!attrs[XFRMA_ENCAP]) { NL_SET_ERR_MSG(extack, "MTIMER_THRESH attribute can only be set on ENCAP states"); err = -EINVAL; goto out; } if (sa_dir == XFRM_SA_DIR_OUT) { NL_SET_ERR_MSG(extack, "MTIMER_THRESH attribute should not be set on output SA"); err = -EINVAL; goto out; } } if (sa_dir == XFRM_SA_DIR_OUT) { if (p->flags & XFRM_STATE_DECAP_DSCP) { NL_SET_ERR_MSG(extack, "Flag DECAP_DSCP should not be set for output SA"); err = -EINVAL; goto out; } if (p->flags & XFRM_STATE_ICMP) { NL_SET_ERR_MSG(extack, "Flag ICMP should not be set for output SA"); err = -EINVAL; goto out; } if (p->flags & XFRM_STATE_WILDRECV) { NL_SET_ERR_MSG(extack, "Flag WILDRECV should not be set for output SA"); err = -EINVAL; goto out; } if (p->replay_window) { NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA"); err = -EINVAL; goto out; } if (attrs[XFRMA_IPTFS_DROP_TIME]) { NL_SET_ERR_MSG(extack, "IP-TFS drop time should not be set for output SA"); err = -EINVAL; goto out; } if (attrs[XFRMA_IPTFS_REORDER_WINDOW]) { NL_SET_ERR_MSG(extack, "IP-TFS reorder window should not be set for output SA"); err = -EINVAL; goto out; } if (attrs[XFRMA_REPLAY_VAL]) { struct xfrm_replay_state *replay; replay = nla_data(attrs[XFRMA_REPLAY_VAL]); if (replay->seq || replay->bitmap) { NL_SET_ERR_MSG(extack, "Replay seq and bitmap should be 0 for output SA"); err = -EINVAL; goto out; } } } if (sa_dir == XFRM_SA_DIR_IN) { if (p->flags & XFRM_STATE_NOPMTUDISC) { NL_SET_ERR_MSG(extack, "Flag NOPMTUDISC should not be set for input SA"); err = -EINVAL; goto out; } if (attrs[XFRMA_SA_EXTRA_FLAGS]) { u32 xflags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); if (xflags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP) { NL_SET_ERR_MSG(extack, "Flag DONT_ENCAP_DSCP should not be set for input SA"); err = -EINVAL; goto out; } if (xflags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP) { NL_SET_ERR_MSG(extack, "Flag OSEQ_MAY_WRAP should not be set for input SA"); err = -EINVAL; goto out; } } if (attrs[XFRMA_IPTFS_DONT_FRAG]) { NL_SET_ERR_MSG(extack, "IP-TFS don't fragment should not be set for input SA"); err = -EINVAL; goto out; } if (attrs[XFRMA_IPTFS_INIT_DELAY]) { NL_SET_ERR_MSG(extack, "IP-TFS initial delay should not be set for input SA"); err = -EINVAL; goto out; } if (attrs[XFRMA_IPTFS_MAX_QSIZE]) { NL_SET_ERR_MSG(extack, "IP-TFS max queue size should not be set for input SA"); err = -EINVAL; goto out; } if (attrs[XFRMA_IPTFS_PKT_SIZE]) { NL_SET_ERR_MSG(extack, "IP-TFS packet size should not be set for input SA"); err = -EINVAL; goto out; } } if (!sa_dir && attrs[XFRMA_SA_PCPU]) { NL_SET_ERR_MSG(extack, "SA_PCPU only supported with SA_DIR"); err = -EINVAL; goto out; } out: return err; } static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, struct xfrm_algo_desc *(*get_byname)(const char *, int), struct nlattr *rta, struct netlink_ext_ack *extack) { struct xfrm_algo *p, *ualg; struct xfrm_algo_desc *algo; if (!rta) return 0; ualg = nla_data(rta); algo = get_byname(ualg->alg_name, 1); if (!algo) { NL_SET_ERR_MSG(extack, "Requested COMP algorithm not found"); return -ENOSYS; } *props = algo->desc.sadb_alg_id; p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); if (!p) return -ENOMEM; strscpy(p->alg_name, algo->name); *algpp = p; return 0; } static int attach_crypt(struct xfrm_state *x, struct nlattr *rta, struct netlink_ext_ack *extack) { struct xfrm_algo *p, *ualg; struct xfrm_algo_desc *algo; if (!rta) return 0; ualg = nla_data(rta); algo = xfrm_ealg_get_byname(ualg->alg_name, 1); if (!algo) { NL_SET_ERR_MSG(extack, "Requested CRYPT algorithm not found"); return -ENOSYS; } x->props.ealgo = algo->desc.sadb_alg_id; p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); if (!p) return -ENOMEM; strscpy(p->alg_name, algo->name); x->ealg = p; x->geniv = algo->uinfo.encr.geniv; return 0; } static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, struct nlattr *rta, struct netlink_ext_ack *extack) { struct xfrm_algo *ualg; struct xfrm_algo_auth *p; struct xfrm_algo_desc *algo; if (!rta) return 0; ualg = nla_data(rta); algo = xfrm_aalg_get_byname(ualg->alg_name, 1); if (!algo) { NL_SET_ERR_MSG(extack, "Requested AUTH algorithm not found"); return -ENOSYS; } *props = algo->desc.sadb_alg_id; p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL); if (!p) return -ENOMEM; strscpy(p->alg_name, algo->name); p->alg_key_len = ualg->alg_key_len; p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8); *algpp = p; return 0; } static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, struct nlattr *rta, struct netlink_ext_ack *extack) { struct xfrm_algo_auth *p, *ualg; struct xfrm_algo_desc *algo; if (!rta) return 0; ualg = nla_data(rta); algo = xfrm_aalg_get_byname(ualg->alg_name, 1); if (!algo) { NL_SET_ERR_MSG(extack, "Requested AUTH_TRUNC algorithm not found"); return -ENOSYS; } if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) { NL_SET_ERR_MSG(extack, "Invalid length requested for truncated ICV"); return -EINVAL; } *props = algo->desc.sadb_alg_id; p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL); if (!p) return -ENOMEM; strscpy(p->alg_name, algo->name); if (!p->alg_trunc_len) p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; *algpp = p; return 0; } static int attach_aead(struct xfrm_state *x, struct nlattr *rta, struct netlink_ext_ack *extack) { struct xfrm_algo_aead *p, *ualg; struct xfrm_algo_desc *algo; if (!rta) return 0; ualg = nla_data(rta); algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); if (!algo) { NL_SET_ERR_MSG(extack, "Requested AEAD algorithm not found"); return -ENOSYS; } x->props.ealgo = algo->desc.sadb_alg_id; p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); if (!p) return -ENOMEM; strscpy(p->alg_name, algo->name); x->aead = p; x->geniv = algo->uinfo.aead.geniv; return 0; } static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, struct nlattr *rp, struct netlink_ext_ack *extack) { struct xfrm_replay_state_esn *up; unsigned int ulen; if (!replay_esn || !rp) return 0; up = nla_data(rp); ulen = xfrm_replay_state_esn_len(up); /* Check the overall length and the internal bitmap length to avoid * potential overflow. */ if (nla_len(rp) < (int)ulen) { NL_SET_ERR_MSG(extack, "ESN attribute is too short"); return -EINVAL; } if (xfrm_replay_state_esn_len(replay_esn) != ulen) { NL_SET_ERR_MSG(extack, "New ESN size doesn't match the existing SA's ESN size"); return -EINVAL; } if (replay_esn->bmp_len != up->bmp_len) { NL_SET_ERR_MSG(extack, "New ESN bitmap size doesn't match the existing SA's ESN bitmap"); return -EINVAL; } if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) { NL_SET_ERR_MSG(extack, "ESN replay window is longer than the bitmap"); return -EINVAL; } return 0; } static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, struct xfrm_replay_state_esn **preplay_esn, struct nlattr *rta) { struct xfrm_replay_state_esn *p, *pp, *up; unsigned int klen, ulen; if (!rta) return 0; up = nla_data(rta); klen = xfrm_replay_state_esn_len(up); ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up); p = kzalloc(klen, GFP_KERNEL); if (!p) return -ENOMEM; pp = kzalloc(klen, GFP_KERNEL); if (!pp) { kfree(p); return -ENOMEM; } memcpy(p, up, ulen); memcpy(pp, up, ulen); *replay_esn = p; *preplay_esn = pp; return 0; } static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) { unsigned int len = 0; if (xfrm_ctx) { len += sizeof(struct xfrm_user_sec_ctx); len += xfrm_ctx->ctx_len; } return len; } static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) { memcpy(&x->id, &p->id, sizeof(x->id)); memcpy(&x->sel, &p->sel, sizeof(x->sel)); memcpy(&x->lft, &p->lft, sizeof(x->lft)); x->props.mode = p->mode; x->props.replay_window = min_t(unsigned int, p->replay_window, sizeof(x->replay.bitmap) * 8); x->props.reqid = p->reqid; x->props.family = p->family; memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); x->props.flags = p->flags; if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC)) x->sel.family = p->family; } /* * someday when pfkey also has support, we could have the code * somehow made shareable and move it to xfrm_state.c - JHS * */ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, int update_esn) { struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH]; if (re && x->replay_esn && x->preplay_esn) { struct xfrm_replay_state_esn *replay_esn; replay_esn = nla_data(re); memcpy(x->replay_esn, replay_esn, xfrm_replay_state_esn_len(replay_esn)); memcpy(x->preplay_esn, replay_esn, xfrm_replay_state_esn_len(replay_esn)); } if (rp) { struct xfrm_replay_state *replay; replay = nla_data(rp); memcpy(&x->replay, replay, sizeof(*replay)); memcpy(&x->preplay, replay, sizeof(*replay)); } if (lt) { struct xfrm_lifetime_cur *ltime; ltime = nla_data(lt); x->curlft.bytes = ltime->bytes; x->curlft.packets = ltime->packets; x->curlft.add_time = ltime->add_time; x->curlft.use_time = ltime->use_time; } if (et) x->replay_maxage = nla_get_u32(et); if (rt) x->replay_maxdiff = nla_get_u32(rt); if (mt) x->mapping_maxage = nla_get_u32(mt); } static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m) { if (attrs[XFRMA_SET_MARK]) { m->v = nla_get_u32(attrs[XFRMA_SET_MARK]); m->m = nla_get_u32_default(attrs[XFRMA_SET_MARK_MASK], 0xffffffff); } else { m->v = m->m = 0; } } static struct xfrm_state *xfrm_state_construct(struct net *net, struct xfrm_usersa_info *p, struct nlattr **attrs, int *errp, struct netlink_ext_ack *extack) { struct xfrm_state *x = xfrm_state_alloc(net); int err = -ENOMEM; if (!x) goto error_no_put; copy_from_user_state(x, p); if (attrs[XFRMA_ENCAP]) { x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), sizeof(*x->encap), GFP_KERNEL); if (x->encap == NULL) goto error; } if (attrs[XFRMA_COADDR]) { x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), sizeof(*x->coaddr), GFP_KERNEL); if (x->coaddr == NULL) goto error; } if (attrs[XFRMA_SA_EXTRA_FLAGS]) x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD], extack))) goto error; if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, attrs[XFRMA_ALG_AUTH_TRUNC], extack))) goto error; if (!x->props.aalgo) { if ((err = attach_auth(&x->aalg, &x->props.aalgo, attrs[XFRMA_ALG_AUTH], extack))) goto error; } if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT], extack))) goto error; if ((err = attach_one_algo(&x->calg, &x->props.calgo, xfrm_calg_get_byname, attrs[XFRMA_ALG_COMP], extack))) goto error; if (attrs[XFRMA_TFCPAD]) x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); xfrm_mark_get(attrs, &x->mark); xfrm_smark_init(attrs, &x->props.smark); if (attrs[XFRMA_IF_ID]) x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); if (attrs[XFRMA_SA_DIR]) x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]); if (attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]) x->nat_keepalive_interval = nla_get_u32(attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]); if (attrs[XFRMA_SA_PCPU]) { x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]); if (x->pcpu_num >= num_possible_cpus()) goto error; } err = __xfrm_init_state(x, extack); if (err) goto error; if (attrs[XFRMA_SEC_CTX]) { err = security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX])); if (err) goto error; } if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, attrs[XFRMA_REPLAY_ESN_VAL]))) goto error; x->km.seq = p->seq; x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth; /* sysctl_xfrm_aevent_etime is in 100ms units */ x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M; if ((err = xfrm_init_replay(x, extack))) goto error; /* override default values from above */ xfrm_update_ae_params(x, attrs, 0); xfrm_set_type_offload(x, attrs[XFRMA_OFFLOAD_DEV]); /* configure the hardware if offload is requested */ if (attrs[XFRMA_OFFLOAD_DEV]) { err = xfrm_dev_state_add(net, x, nla_data(attrs[XFRMA_OFFLOAD_DEV]), extack); if (err) goto error; } if (x->mode_cbs && x->mode_cbs->user_init) { err = x->mode_cbs->user_init(net, x, attrs, extack); if (err) goto error; } return x; error: x->km.state = XFRM_STATE_DEAD; xfrm_state_put(x); error_no_put: *errp = err; return NULL; } static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_usersa_info *p = nlmsg_data(nlh); struct xfrm_state *x; int err; struct km_event c; err = verify_newsa_info(p, attrs, extack); if (err) return err; x = xfrm_state_construct(net, p, attrs, &err, extack); if (!x) return err; xfrm_state_hold(x); if (nlh->nlmsg_type == XFRM_MSG_NEWSA) err = xfrm_state_add(x); else err = xfrm_state_update(x); xfrm_audit_state_add(x, err ? 0 : 1, true); if (err < 0) { x->km.state = XFRM_STATE_DEAD; xfrm_dev_state_delete(x); __xfrm_state_put(x); goto out; } if (x->km.state == XFRM_STATE_VOID) x->km.state = XFRM_STATE_VALID; c.seq = nlh->nlmsg_seq; c.portid = nlh->nlmsg_pid; c.event = nlh->nlmsg_type; km_state_notify(x, &c); out: xfrm_state_put(x); return err; } static struct xfrm_state *xfrm_user_state_lookup(struct net *net, struct xfrm_usersa_id *p, struct nlattr **attrs, int *errp) { struct xfrm_state *x = NULL; struct xfrm_mark m; int err; u32 mark = xfrm_mark_get(attrs, &m); if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { err = -ESRCH; x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family); } else { xfrm_address_t *saddr = NULL; verify_one_addr(attrs, XFRMA_SRCADDR, &saddr); if (!saddr) { err = -EINVAL; goto out; } err = -ESRCH; x = xfrm_state_lookup_byaddr(net, mark, &p->daddr, saddr, p->proto, p->family); } out: if (!x && errp) *errp = err; return x; } static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_state *x; int err = -ESRCH; struct km_event c; struct xfrm_usersa_id *p = nlmsg_data(nlh); x = xfrm_user_state_lookup(net, p, attrs, &err); if (x == NULL) return err; if ((err = security_xfrm_state_delete(x)) != 0) goto out; if (xfrm_state_kern(x)) { NL_SET_ERR_MSG(extack, "SA is in use by tunnels"); err = -EPERM; goto out; } err = xfrm_state_delete(x); if (err < 0) goto out; c.seq = nlh->nlmsg_seq; c.portid = nlh->nlmsg_pid; c.event = nlh->nlmsg_type; km_state_notify(x, &c); out: xfrm_audit_state_delete(x, err ? 0 : 1, true); xfrm_state_put(x); return err; } static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) { memset(p, 0, sizeof(*p)); memcpy(&p->id, &x->id, sizeof(p->id)); memcpy(&p->sel, &x->sel, sizeof(p->sel)); memcpy(&p->lft, &x->lft, sizeof(p->lft)); if (x->xso.dev) xfrm_dev_state_update_stats(x); memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); put_unaligned(x->stats.replay_window, &p->stats.replay_window); put_unaligned(x->stats.replay, &p->stats.replay); put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed); memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); p->mode = x->props.mode; p->replay_window = x->props.replay_window; p->reqid = x->props.reqid; p->family = x->props.family; p->flags = x->props.flags; p->seq = x->km.seq; } struct xfrm_dump_info { struct sk_buff *in_skb; struct sk_buff *out_skb; u32 nlmsg_seq; u16 nlmsg_flags; }; static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) { struct xfrm_user_sec_ctx *uctx; struct nlattr *attr; int ctx_size = sizeof(*uctx) + s->ctx_len; attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); if (attr == NULL) return -EMSGSIZE; uctx = nla_data(attr); uctx->exttype = XFRMA_SEC_CTX; uctx->len = ctx_size; uctx->ctx_doi = s->ctx_doi; uctx->ctx_alg = s->ctx_alg; uctx->ctx_len = s->ctx_len; memcpy(uctx + 1, s->ctx_str, s->ctx_len); return 0; } static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb) { struct xfrm_user_offload *xuo; struct nlattr *attr; attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo)); if (attr == NULL) return -EMSGSIZE; xuo = nla_data(attr); memset(xuo, 0, sizeof(*xuo)); xuo->ifindex = xso->dev->ifindex; if (xso->dir == XFRM_DEV_OFFLOAD_IN) xuo->flags = XFRM_OFFLOAD_INBOUND; if (xso->type == XFRM_DEV_OFFLOAD_PACKET) xuo->flags |= XFRM_OFFLOAD_PACKET; return 0; } static bool xfrm_redact(void) { return IS_ENABLED(CONFIG_SECURITY) && security_locked_down(LOCKDOWN_XFRM_SECRET); } static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) { struct xfrm_algo *algo; struct xfrm_algo_auth *ap; struct nlattr *nla; bool redact_secret = xfrm_redact(); nla = nla_reserve(skb, XFRMA_ALG_AUTH, sizeof(*algo) + (auth->alg_key_len + 7) / 8); if (!nla) return -EMSGSIZE; algo = nla_data(nla); strscpy_pad(algo->alg_name, auth->alg_name); if (redact_secret && auth->alg_key_len) memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8); else memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); algo->alg_key_len = auth->alg_key_len; nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth)); if (!nla) return -EMSGSIZE; ap = nla_data(nla); strscpy_pad(ap->alg_name, auth->alg_name); ap->alg_key_len = auth->alg_key_len; ap->alg_trunc_len = auth->alg_trunc_len; if (redact_secret && auth->alg_key_len) memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8); else memcpy(ap->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); return 0; } static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb) { struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead)); struct xfrm_algo_aead *ap; bool redact_secret = xfrm_redact(); if (!nla) return -EMSGSIZE; ap = nla_data(nla); strscpy_pad(ap->alg_name, aead->alg_name); ap->alg_key_len = aead->alg_key_len; ap->alg_icv_len = aead->alg_icv_len; if (redact_secret && aead->alg_key_len) memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8); else memcpy(ap->alg_key, aead->alg_key, (aead->alg_key_len + 7) / 8); return 0; } static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb) { struct xfrm_algo *ap; bool redact_secret = xfrm_redact(); struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(ealg)); if (!nla) return -EMSGSIZE; ap = nla_data(nla); strscpy_pad(ap->alg_name, ealg->alg_name); ap->alg_key_len = ealg->alg_key_len; if (redact_secret && ealg->alg_key_len) memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8); else memcpy(ap->alg_key, ealg->alg_key, (ealg->alg_key_len + 7) / 8); return 0; } static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb) { struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg)); struct xfrm_algo *ap; if (!nla) return -EMSGSIZE; ap = nla_data(nla); strscpy_pad(ap->alg_name, calg->alg_name); ap->alg_key_len = 0; return 0; } static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb) { struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep)); struct xfrm_encap_tmpl *uep; if (!nla) return -EMSGSIZE; uep = nla_data(nla); memset(uep, 0, sizeof(*uep)); uep->encap_type = ep->encap_type; uep->encap_sport = ep->encap_sport; uep->encap_dport = ep->encap_dport; uep->encap_oa = ep->encap_oa; return 0; } static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m) { int ret = 0; if (m->v | m->m) { ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v); if (!ret) ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m); } return ret; } /* Don't change this without updating xfrm_sa_len! */ static int copy_to_user_state_extra(struct xfrm_state *x, struct xfrm_usersa_info *p, struct sk_buff *skb) { int ret = 0; copy_to_user_state(x, p); if (x->props.extra_flags) { ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS, x->props.extra_flags); if (ret) goto out; } if (x->coaddr) { ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); if (ret) goto out; } if (x->lastused) { ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused, XFRMA_PAD); if (ret) goto out; } if (x->aead) { ret = copy_to_user_aead(x->aead, skb); if (ret) goto out; } if (x->aalg) { ret = copy_to_user_auth(x->aalg, skb); if (ret) goto out; } if (x->ealg) { ret = copy_to_user_ealg(x->ealg, skb); if (ret) goto out; } if (x->calg) { ret = copy_to_user_calg(x->calg, skb); if (ret) goto out; } if (x->encap) { ret = copy_to_user_encap(x->encap, skb); if (ret) goto out; } if (x->tfcpad) { ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad); if (ret) goto out; } ret = xfrm_mark_put(skb, &x->mark); if (ret) goto out; ret = xfrm_smark_put(skb, &x->props.smark); if (ret) goto out; if (x->replay_esn) ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL, xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn); else ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); if (ret) goto out; if(x->xso.dev) ret = copy_user_offload(&x->xso, skb); if (ret) goto out; if (x->if_id) { ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id); if (ret) goto out; } if (x->security) { ret = copy_sec_ctx(x->security, skb); if (ret) goto out; } if (x->mode_cbs && x->mode_cbs->copy_to_user) ret = x->mode_cbs->copy_to_user(x, skb); if (ret) goto out; if (x->mapping_maxage) { ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage); if (ret) goto out; } if (x->pcpu_num != UINT_MAX) { ret = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); if (ret) goto out; } if (x->dir) ret = nla_put_u8(skb, XFRMA_SA_DIR, x->dir); if (x->nat_keepalive_interval) { ret = nla_put_u32(skb, XFRMA_NAT_KEEPALIVE_INTERVAL, x->nat_keepalive_interval); if (ret) goto out; } out: return ret; } static int dump_one_state(struct xfrm_state *x, int count, void *ptr) { struct xfrm_dump_info *sp = ptr; struct sk_buff *in_skb = sp->in_skb; struct sk_buff *skb = sp->out_skb; struct xfrm_translator *xtr; struct xfrm_usersa_info *p; struct nlmsghdr *nlh; int err; nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); if (nlh == NULL) return -EMSGSIZE; p = nlmsg_data(nlh); err = copy_to_user_state_extra(x, p, skb); if (err) { nlmsg_cancel(skb, nlh); return err; } nlmsg_end(skb, nlh); xtr = xfrm_get_translator(); if (xtr) { err = xtr->alloc_compat(skb, nlh); xfrm_put_translator(xtr); if (err) { nlmsg_cancel(skb, nlh); return err; } } return 0; } static int xfrm_dump_sa_done(struct netlink_callback *cb) { struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; struct sock *sk = cb->skb->sk; struct net *net = sock_net(sk); if (cb->args[0]) xfrm_state_walk_done(walk, net); return 0; } static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; struct xfrm_dump_info info; BUILD_BUG_ON(sizeof(struct xfrm_state_walk) > sizeof(cb->args) - sizeof(cb->args[0])); info.in_skb = cb->skb; info.out_skb = skb; info.nlmsg_seq = cb->nlh->nlmsg_seq; info.nlmsg_flags = NLM_F_MULTI; if (!cb->args[0]) { struct nlattr *attrs[XFRMA_MAX+1]; struct xfrm_address_filter *filter = NULL; u8 proto = 0; int err; err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX, xfrma_policy, cb->extack); if (err < 0) return err; if (attrs[XFRMA_ADDRESS_FILTER]) { filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]), sizeof(*filter), GFP_KERNEL); if (filter == NULL) return -ENOMEM; /* see addr_match(), (prefix length >> 5) << 2 * will be used to compare xfrm_address_t */ if (filter->splen > (sizeof(xfrm_address_t) << 3) || filter->dplen > (sizeof(xfrm_address_t) << 3)) { kfree(filter); return -EINVAL; } } if (attrs[XFRMA_PROTO]) proto = nla_get_u8(attrs[XFRMA_PROTO]); xfrm_state_walk_init(walk, proto, filter); cb->args[0] = 1; } (void) xfrm_state_walk(net, walk, dump_one_state, &info); return skb->len; } static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, struct xfrm_state *x, u32 seq) { struct xfrm_dump_info info; struct sk_buff *skb; int err; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return ERR_PTR(-ENOMEM); info.in_skb = in_skb; info.out_skb = skb; info.nlmsg_seq = seq; info.nlmsg_flags = 0; err = dump_one_state(x, 0, &info); if (err) { kfree_skb(skb); return ERR_PTR(err); } return skb; } /* A wrapper for nlmsg_multicast() checking that nlsk is still available. * Must be called with RCU read lock. */ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb, u32 pid, unsigned int group) { struct sock *nlsk = rcu_dereference(net->xfrm.nlsk); struct xfrm_translator *xtr; if (!nlsk) { kfree_skb(skb); return -EPIPE; } xtr = xfrm_get_translator(); if (xtr) { int err = xtr->alloc_compat(skb, nlmsg_hdr(skb)); xfrm_put_translator(xtr); if (err) { kfree_skb(skb); return err; } } return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC); } static inline unsigned int xfrm_spdinfo_msgsize(void) { return NLMSG_ALIGN(4) + nla_total_size(sizeof(struct xfrmu_spdinfo)) + nla_total_size(sizeof(struct xfrmu_spdhinfo)) + nla_total_size(sizeof(struct xfrmu_spdhthresh)) + nla_total_size(sizeof(struct xfrmu_spdhthresh)); } static int build_spdinfo(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, u32 flags) { struct xfrmk_spdinfo si; struct xfrmu_spdinfo spc; struct xfrmu_spdhinfo sph; struct xfrmu_spdhthresh spt4, spt6; struct nlmsghdr *nlh; int err; u32 *f; unsigned lseq; nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); if (nlh == NULL) /* shouldn't really happen ... */ return -EMSGSIZE; f = nlmsg_data(nlh); *f = flags; xfrm_spd_getinfo(net, &si); spc.incnt = si.incnt; spc.outcnt = si.outcnt; spc.fwdcnt = si.fwdcnt; spc.inscnt = si.inscnt; spc.outscnt = si.outscnt; spc.fwdscnt = si.fwdscnt; sph.spdhcnt = si.spdhcnt; sph.spdhmcnt = si.spdhmcnt; do { lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock); spt4.lbits = net->xfrm.policy_hthresh.lbits4; spt4.rbits = net->xfrm.policy_hthresh.rbits4; spt6.lbits = net->xfrm.policy_hthresh.lbits6; spt6.rbits = net->xfrm.policy_hthresh.rbits6; } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq)); err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); if (!err) err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); if (!err) err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4); if (!err) err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6); if (err) { nlmsg_cancel(skb, nlh); return err; } nlmsg_end(skb, nlh); return 0; } static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrmu_spdhthresh *thresh4 = NULL; struct xfrmu_spdhthresh *thresh6 = NULL; /* selector prefixlen thresholds to hash policies */ if (attrs[XFRMA_SPD_IPV4_HTHRESH]) { struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH]; if (nla_len(rta) < sizeof(*thresh4)) { NL_SET_ERR_MSG(extack, "Invalid SPD_IPV4_HTHRESH attribute length"); return -EINVAL; } thresh4 = nla_data(rta); if (thresh4->lbits > 32 || thresh4->rbits > 32) { NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 32 for IPv4)"); return -EINVAL; } } if (attrs[XFRMA_SPD_IPV6_HTHRESH]) { struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH]; if (nla_len(rta) < sizeof(*thresh6)) { NL_SET_ERR_MSG(extack, "Invalid SPD_IPV6_HTHRESH attribute length"); return -EINVAL; } thresh6 = nla_data(rta); if (thresh6->lbits > 128 || thresh6->rbits > 128) { NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 128 for IPv6)"); return -EINVAL; } } if (thresh4 || thresh6) { write_seqlock(&net->xfrm.policy_hthresh.lock); if (thresh4) { net->xfrm.policy_hthresh.lbits4 = thresh4->lbits; net->xfrm.policy_hthresh.rbits4 = thresh4->rbits; } if (thresh6) { net->xfrm.policy_hthresh.lbits6 = thresh6->lbits; net->xfrm.policy_hthresh.rbits6 = thresh6->rbits; } write_sequnlock(&net->xfrm.policy_hthresh.lock); xfrm_policy_hash_rebuild(net); } return 0; } static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct sk_buff *r_skb; u32 *flags = nlmsg_data(nlh); u32 sportid = NETLINK_CB(skb).portid; u32 seq = nlh->nlmsg_seq; int err; r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM; err = build_spdinfo(r_skb, net, sportid, seq, *flags); BUG_ON(err < 0); return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); } static inline unsigned int xfrm_sadinfo_msgsize(void) { return NLMSG_ALIGN(4) + nla_total_size(sizeof(struct xfrmu_sadhinfo)) + nla_total_size(4); /* XFRMA_SAD_CNT */ } static int build_sadinfo(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, u32 flags) { struct xfrmk_sadinfo si; struct xfrmu_sadhinfo sh; struct nlmsghdr *nlh; int err; u32 *f; nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); if (nlh == NULL) /* shouldn't really happen ... */ return -EMSGSIZE; f = nlmsg_data(nlh); *f = flags; xfrm_sad_getinfo(net, &si); sh.sadhmcnt = si.sadhmcnt; sh.sadhcnt = si.sadhcnt; err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt); if (!err) err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); if (err) { nlmsg_cancel(skb, nlh); return err; } nlmsg_end(skb, nlh); return 0; } static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct sk_buff *r_skb; u32 *flags = nlmsg_data(nlh); u32 sportid = NETLINK_CB(skb).portid; u32 seq = nlh->nlmsg_seq; int err; r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); if (r_skb == NULL) return -ENOMEM; err = build_sadinfo(r_skb, net, sportid, seq, *flags); BUG_ON(err < 0); return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid); } static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_usersa_id *p = nlmsg_data(nlh); struct xfrm_state *x; struct sk_buff *resp_skb; int err = -ESRCH; x = xfrm_user_state_lookup(net, p, attrs, &err); if (x == NULL) goto out_noput; resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); } else { err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); } xfrm_state_put(x); out_noput: return err; } static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_state *x; struct xfrm_userspi_info *p; struct xfrm_translator *xtr; struct sk_buff *resp_skb; xfrm_address_t *daddr; int family; int err; u32 mark; struct xfrm_mark m; u32 if_id = 0; u32 pcpu_num = UINT_MAX; p = nlmsg_data(nlh); err = verify_spi_info(p->info.id.proto, p->min, p->max, extack); if (err) goto out_noput; family = p->info.family; daddr = &p->info.id.daddr; x = NULL; mark = xfrm_mark_get(attrs, &m); if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); if (attrs[XFRMA_SA_PCPU]) { pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]); if (pcpu_num >= num_possible_cpus()) { err = -EINVAL; goto out_noput; } } if (p->info.seq) { x = xfrm_find_acq_byseq(net, mark, p->info.seq, pcpu_num); if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) { xfrm_state_put(x); x = NULL; } } if (!x) x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid, if_id, pcpu_num, p->info.id.proto, daddr, &p->info.saddr, 1, family); err = -ENOENT; if (!x) { NL_SET_ERR_MSG(extack, "Target ACQUIRE not found"); goto out_noput; } err = xfrm_alloc_spi(x, p->min, p->max, extack); if (err) goto out; if (attrs[XFRMA_SA_DIR]) x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]); resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); goto out; } xtr = xfrm_get_translator(); if (xtr) { err = xtr->alloc_compat(skb, nlmsg_hdr(skb)); xfrm_put_translator(xtr); if (err) { kfree_skb(resp_skb); goto out; } } err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); out: xfrm_state_put(x); out_noput: return err; } static int verify_policy_dir(u8 dir, struct netlink_ext_ack *extack) { switch (dir) { case XFRM_POLICY_IN: case XFRM_POLICY_OUT: case XFRM_POLICY_FWD: break; default: NL_SET_ERR_MSG(extack, "Invalid policy direction"); return -EINVAL; } return 0; } static int verify_policy_type(u8 type, struct netlink_ext_ack *extack) { switch (type) { case XFRM_POLICY_TYPE_MAIN: #ifdef CONFIG_XFRM_SUB_POLICY case XFRM_POLICY_TYPE_SUB: #endif break; default: NL_SET_ERR_MSG(extack, "Invalid policy type"); return -EINVAL; } return 0; } static int verify_newpolicy_info(struct xfrm_userpolicy_info *p, struct netlink_ext_ack *extack) { int ret; switch (p->share) { case XFRM_SHARE_ANY: case XFRM_SHARE_SESSION: case XFRM_SHARE_USER: case XFRM_SHARE_UNIQUE: break; default: NL_SET_ERR_MSG(extack, "Invalid policy share"); return -EINVAL; } switch (p->action) { case XFRM_POLICY_ALLOW: case XFRM_POLICY_BLOCK: break; default: NL_SET_ERR_MSG(extack, "Invalid policy action"); return -EINVAL; } switch (p->sel.family) { case AF_INET: if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) { NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)"); return -EINVAL; } break; case AF_INET6: #if IS_ENABLED(CONFIG_IPV6) if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) { NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)"); return -EINVAL; } break; #else NL_SET_ERR_MSG(extack, "IPv6 support disabled"); return -EAFNOSUPPORT; #endif default: NL_SET_ERR_MSG(extack, "Invalid selector family"); return -EINVAL; } ret = verify_policy_dir(p->dir, extack); if (ret) return ret; if (p->index && (xfrm_policy_id2dir(p->index) != p->dir)) { NL_SET_ERR_MSG(extack, "Policy index doesn't match direction"); return -EINVAL; } return 0; } static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_user_sec_ctx *uctx; if (!rt) return 0; uctx = nla_data(rt); return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL); } static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, int nr) { int i; xp->xfrm_nr = nr; for (i = 0; i < nr; i++, ut++) { struct xfrm_tmpl *t = &xp->xfrm_vec[i]; memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); memcpy(&t->saddr, &ut->saddr, sizeof(xfrm_address_t)); t->reqid = ut->reqid; t->mode = ut->mode; t->share = ut->share; t->optional = ut->optional; t->aalgos = ut->aalgos; t->ealgos = ut->ealgos; t->calgos = ut->calgos; /* If all masks are ~0, then we allow all algorithms. */ t->allalgs = !~(t->aalgos & t->ealgos & t->calgos); t->encap_family = ut->family; } } static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family, int dir, struct netlink_ext_ack *extack) { u16 prev_family; int i; if (nr > XFRM_MAX_DEPTH) { NL_SET_ERR_MSG(extack, "Template count must be <= XFRM_MAX_DEPTH (" __stringify(XFRM_MAX_DEPTH) ")"); return -EINVAL; } prev_family = family; for (i = 0; i < nr; i++) { /* We never validated the ut->family value, so many * applications simply leave it at zero. The check was * never made and ut->family was ignored because all * templates could be assumed to have the same family as * the policy itself. Now that we will have ipv4-in-ipv6 * and ipv6-in-ipv4 tunnels, this is no longer true. */ if (!ut[i].family) ut[i].family = family; switch (ut[i].mode) { case XFRM_MODE_TUNNEL: case XFRM_MODE_BEET: if (ut[i].optional && dir == XFRM_POLICY_OUT) { NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy"); return -EINVAL; } break; case XFRM_MODE_IPTFS: break; default: if (ut[i].family != prev_family) { NL_SET_ERR_MSG(extack, "Mode in template doesn't support a family change"); return -EINVAL; } break; } if (ut[i].mode >= XFRM_MODE_MAX) { NL_SET_ERR_MSG(extack, "Mode in template must be < XFRM_MODE_MAX (" __stringify(XFRM_MODE_MAX) ")"); return -EINVAL; } prev_family = ut[i].family; switch (ut[i].family) { case AF_INET: break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: break; #endif default: NL_SET_ERR_MSG(extack, "Invalid family in template"); return -EINVAL; } if (!xfrm_id_proto_valid(ut[i].id.proto)) { NL_SET_ERR_MSG(extack, "Invalid XFRM protocol in template"); return -EINVAL; } } return 0; } static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs, int dir, struct netlink_ext_ack *extack) { struct nlattr *rt = attrs[XFRMA_TMPL]; if (!rt) { pol->xfrm_nr = 0; } else { struct xfrm_user_tmpl *utmpl = nla_data(rt); int nr = nla_len(rt) / sizeof(*utmpl); int err; err = validate_tmpl(nr, utmpl, pol->family, dir, extack); if (err) return err; copy_templates(pol, utmpl, nr); } return 0; } static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; struct xfrm_userpolicy_type *upt; u8 type = XFRM_POLICY_TYPE_MAIN; int err; if (rt) { upt = nla_data(rt); type = upt->type; } err = verify_policy_type(type, extack); if (err) return err; *tp = type; return 0; } static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) { xp->priority = p->priority; xp->index = p->index; memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); xp->action = p->action; xp->flags = p->flags; xp->family = p->sel.family; /* XXX xp->share = p->share; */ } static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) { memset(p, 0, sizeof(*p)); memcpy(&p->sel, &xp->selector, sizeof(p->sel)); memcpy(&p->lft, &xp->lft, sizeof(p->lft)); memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); p->priority = xp->priority; p->index = xp->index; p->sel.family = xp->family; p->dir = dir; p->action = xp->action; p->flags = xp->flags; p->share = XFRM_SHARE_ANY; /* XXX xp->share */ } static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp, struct netlink_ext_ack *extack) { struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL); int err; if (!xp) { *errp = -ENOMEM; return NULL; } copy_from_user_policy(xp, p); err = copy_from_user_policy_type(&xp->type, attrs, extack); if (err) goto error; if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack))) err = copy_from_user_sec_ctx(xp, attrs); if (err) goto error; xfrm_mark_get(attrs, &xp->mark); if (attrs[XFRMA_IF_ID]) xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); /* configure the hardware if offload is requested */ if (attrs[XFRMA_OFFLOAD_DEV]) { err = xfrm_dev_policy_add(net, xp, nla_data(attrs[XFRMA_OFFLOAD_DEV]), p->dir, extack); if (err) goto error; } return xp; error: *errp = err; xp->walk.dead = 1; xfrm_policy_destroy(xp); return NULL; } static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_userpolicy_info *p = nlmsg_data(nlh); struct xfrm_policy *xp; struct km_event c; int err; int excl; err = verify_newpolicy_info(p, extack); if (err) return err; err = verify_sec_ctx_len(attrs, extack); if (err) return err; xp = xfrm_policy_construct(net, p, attrs, &err, extack); if (!xp) return err; /* shouldn't excl be based on nlh flags?? * Aha! this is anti-netlink really i.e more pfkey derived * in netlink excl is a flag and you wouldn't need * a type XFRM_MSG_UPDPOLICY - JHS */ excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; err = xfrm_policy_insert(p->dir, xp, excl); xfrm_audit_policy_add(xp, err ? 0 : 1, true); if (err) { xfrm_dev_policy_delete(xp); xfrm_dev_policy_free(xp); security_xfrm_policy_free(xp->security); kfree(xp); return err; } c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; c.portid = nlh->nlmsg_pid; km_policy_notify(xp, p->dir, &c); xfrm_pol_put(xp); return 0; } static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) { struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; int i; if (xp->xfrm_nr == 0) return 0; if (xp->xfrm_nr > XFRM_MAX_DEPTH) return -ENOBUFS; for (i = 0; i < xp->xfrm_nr; i++) { struct xfrm_user_tmpl *up = &vec[i]; struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; memset(up, 0, sizeof(*up)); memcpy(&up->id, &kp->id, sizeof(up->id)); up->family = kp->encap_family; memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); up->reqid = kp->reqid; up->mode = kp->mode; up->share = kp->share; up->optional = kp->optional; up->aalgos = kp->aalgos; up->ealgos = kp->ealgos; up->calgos = kp->calgos; } return nla_put(skb, XFRMA_TMPL, sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); } static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) { if (x->security) { return copy_sec_ctx(x->security, skb); } return 0; } static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) { if (xp->security) return copy_sec_ctx(xp->security, skb); return 0; } static inline unsigned int userpolicy_type_attrsize(void) { #ifdef CONFIG_XFRM_SUB_POLICY return nla_total_size(sizeof(struct xfrm_userpolicy_type)); #else return 0; #endif } #ifdef CONFIG_XFRM_SUB_POLICY static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) { struct xfrm_userpolicy_type upt; /* Sadly there are two holes in struct xfrm_userpolicy_type */ memset(&upt, 0, sizeof(upt)); upt.type = type; return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); } #else static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) { return 0; } #endif static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) { struct xfrm_dump_info *sp = ptr; struct xfrm_userpolicy_info *p; struct sk_buff *in_skb = sp->in_skb; struct sk_buff *skb = sp->out_skb; struct xfrm_translator *xtr; struct nlmsghdr *nlh; int err; nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq, XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); if (nlh == NULL) return -EMSGSIZE; p = nlmsg_data(nlh); copy_to_user_policy(xp, p, dir); err = copy_to_user_tmpl(xp, skb); if (!err) err = copy_to_user_sec_ctx(xp, skb); if (!err) err = copy_to_user_policy_type(xp->type, skb); if (!err) err = xfrm_mark_put(skb, &xp->mark); if (!err) err = xfrm_if_id_put(skb, xp->if_id); if (!err && xp->xdo.dev) err = copy_user_offload(&xp->xdo, skb); if (err) { nlmsg_cancel(skb, nlh); return err; } nlmsg_end(skb, nlh); xtr = xfrm_get_translator(); if (xtr) { err = xtr->alloc_compat(skb, nlh); xfrm_put_translator(xtr); if (err) { nlmsg_cancel(skb, nlh); return err; } } return 0; } static int xfrm_dump_policy_done(struct netlink_callback *cb) { struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; struct net *net = sock_net(cb->skb->sk); xfrm_policy_walk_done(walk, net); return 0; } static int xfrm_dump_policy_start(struct netlink_callback *cb) { struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args)); xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); return 0; } static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; struct xfrm_dump_info info; info.in_skb = cb->skb; info.out_skb = skb; info.nlmsg_seq = cb->nlh->nlmsg_seq; info.nlmsg_flags = NLM_F_MULTI; (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); return skb->len; } static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, struct xfrm_policy *xp, int dir, u32 seq) { struct xfrm_dump_info info; struct sk_buff *skb; int err; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return ERR_PTR(-ENOMEM); info.in_skb = in_skb; info.out_skb = skb; info.nlmsg_seq = seq; info.nlmsg_flags = 0; err = dump_one_policy(xp, dir, 0, &info); if (err) { kfree_skb(skb); return ERR_PTR(err); } return skb; } static int xfrm_notify_userpolicy(struct net *net) { struct xfrm_userpolicy_default *up; int len = NLMSG_ALIGN(sizeof(*up)); struct nlmsghdr *nlh; struct sk_buff *skb; int err; skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0); if (nlh == NULL) { kfree_skb(skb); return -EMSGSIZE; } up = nlmsg_data(nlh); up->in = net->xfrm.policy_default[XFRM_POLICY_IN]; up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD]; up->out = net->xfrm.policy_default[XFRM_POLICY_OUT]; nlmsg_end(skb, nlh); rcu_read_lock(); err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); rcu_read_unlock(); return err; } static bool xfrm_userpolicy_is_valid(__u8 policy) { return policy == XFRM_USERPOLICY_BLOCK || policy == XFRM_USERPOLICY_ACCEPT; } static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_userpolicy_default *up = nlmsg_data(nlh); if (xfrm_userpolicy_is_valid(up->in)) net->xfrm.policy_default[XFRM_POLICY_IN] = up->in; if (xfrm_userpolicy_is_valid(up->fwd)) net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd; if (xfrm_userpolicy_is_valid(up->out)) net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out; rt_genid_bump_all(net); xfrm_notify_userpolicy(net); return 0; } static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct sk_buff *r_skb; struct nlmsghdr *r_nlh; struct net *net = sock_net(skb->sk); struct xfrm_userpolicy_default *r_up; int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default)); u32 portid = NETLINK_CB(skb).portid; u32 seq = nlh->nlmsg_seq; r_skb = nlmsg_new(len, GFP_ATOMIC); if (!r_skb) return -ENOMEM; r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0); if (!r_nlh) { kfree_skb(r_skb); return -EMSGSIZE; } r_up = nlmsg_data(r_nlh); r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN]; r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD]; r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT]; nlmsg_end(r_skb, r_nlh); return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid); } static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_policy *xp; struct xfrm_userpolicy_id *p; u8 type = XFRM_POLICY_TYPE_MAIN; int err; struct km_event c; int delete; struct xfrm_mark m; u32 if_id = 0; p = nlmsg_data(nlh); delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; err = copy_from_user_policy_type(&type, attrs, extack); if (err) return err; err = verify_policy_dir(p->dir, extack); if (err) return err; if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); xfrm_mark_get(attrs, &m); if (p->index) xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, delete, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; err = verify_sec_ctx_len(attrs, extack); if (err) return err; ctx = NULL; if (rt) { struct xfrm_user_sec_ctx *uctx = nla_data(rt); err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); if (err) return err; } xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, &p->sel, ctx, delete, &err); security_xfrm_policy_free(ctx); } if (xp == NULL) return -ENOENT; if (!delete) { struct sk_buff *resp_skb; resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); if (IS_ERR(resp_skb)) { err = PTR_ERR(resp_skb); } else { err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid); } } else { xfrm_audit_policy_delete(xp, err ? 0 : 1, true); if (err != 0) goto out; c.data.byid = p->index; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; c.portid = nlh->nlmsg_pid; km_policy_notify(xp, p->dir, &c); } out: xfrm_pol_put(xp); return err; } static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct km_event c; struct xfrm_usersa_flush *p = nlmsg_data(nlh); int err; err = xfrm_state_flush(net, p->proto, true); if (err) { if (err == -ESRCH) /* empty table */ return 0; return err; } c.data.proto = p->proto; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; c.portid = nlh->nlmsg_pid; c.net = net; km_state_notify(NULL, &c); return 0; } static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x) { unsigned int replay_size = x->replay_esn ? xfrm_replay_state_esn_len(x->replay_esn) : sizeof(struct xfrm_replay_state); return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) + nla_total_size(replay_size) + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur)) + nla_total_size(sizeof(struct xfrm_mark)) + nla_total_size(4) /* XFRM_AE_RTHR */ + nla_total_size(4) /* XFRM_AE_ETHR */ + nla_total_size(sizeof(x->dir)) /* XFRMA_SA_DIR */ + nla_total_size(4); /* XFRMA_SA_PCPU */ } static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) { struct xfrm_aevent_id *id; struct nlmsghdr *nlh; int err; nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); if (nlh == NULL) return -EMSGSIZE; id = nlmsg_data(nlh); memset(&id->sa_id, 0, sizeof(id->sa_id)); memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr)); id->sa_id.spi = x->id.spi; id->sa_id.family = x->props.family; id->sa_id.proto = x->id.proto; memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr)); id->reqid = x->props.reqid; id->flags = c->data.aevent; if (x->replay_esn) { err = nla_put(skb, XFRMA_REPLAY_ESN_VAL, xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn); } else { err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); } if (err) goto out_cancel; err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft, XFRMA_PAD); if (err) goto out_cancel; if (id->flags & XFRM_AE_RTHR) { err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); if (err) goto out_cancel; } if (id->flags & XFRM_AE_ETHR) { err = nla_put_u32(skb, XFRMA_ETIMER_THRESH, x->replay_maxage * 10 / HZ); if (err) goto out_cancel; } err = xfrm_mark_put(skb, &x->mark); if (err) goto out_cancel; err = xfrm_if_id_put(skb, x->if_id); if (err) goto out_cancel; if (x->pcpu_num != UINT_MAX) { err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); if (err) goto out_cancel; } if (x->dir) { err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir); if (err) goto out_cancel; } nlmsg_end(skb, nlh); return 0; out_cancel: nlmsg_cancel(skb, nlh); return err; } static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_state *x; struct sk_buff *r_skb; int err; struct km_event c; u32 mark; struct xfrm_mark m; struct xfrm_aevent_id *p = nlmsg_data(nlh); struct xfrm_usersa_id *id = &p->sa_id; mark = xfrm_mark_get(attrs, &m); x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family); if (x == NULL) return -ESRCH; r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); if (r_skb == NULL) { xfrm_state_put(x); return -ENOMEM; } /* * XXX: is this lock really needed - none of the other * gets lock (the concern is things getting updated * while we are still reading) - jhs */ spin_lock_bh(&x->lock); c.data.aevent = p->flags; c.seq = nlh->nlmsg_seq; c.portid = nlh->nlmsg_pid; err = build_aevent(r_skb, x, &c); BUG_ON(err < 0); err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid); spin_unlock_bh(&x->lock); xfrm_state_put(x); return err; } static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_state *x; struct km_event c; int err = -EINVAL; u32 mark = 0; struct xfrm_mark m; struct xfrm_aevent_id *p = nlmsg_data(nlh); struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; if (!lt && !rp && !re && !et && !rt) { NL_SET_ERR_MSG(extack, "Missing required attribute for AE"); return err; } /* pedantic mode - thou shalt sayeth replaceth */ if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) { NL_SET_ERR_MSG(extack, "NLM_F_REPLACE flag is required"); return err; } mark = xfrm_mark_get(attrs, &m); x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); if (x == NULL) return -ESRCH; if (x->km.state != XFRM_STATE_VALID) { NL_SET_ERR_MSG(extack, "SA must be in VALID state"); goto out; } err = xfrm_replay_verify_len(x->replay_esn, re, extack); if (err) goto out; spin_lock_bh(&x->lock); xfrm_update_ae_params(x, attrs, 1); spin_unlock_bh(&x->lock); c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; c.portid = nlh->nlmsg_pid; c.data.aevent = XFRM_AE_CU; km_state_notify(x, &c); err = 0; out: xfrm_state_put(x); return err; } static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct km_event c; u8 type = XFRM_POLICY_TYPE_MAIN; int err; err = copy_from_user_policy_type(&type, attrs, extack); if (err) return err; err = xfrm_policy_flush(net, type, true); if (err) { if (err == -ESRCH) /* empty table */ return 0; return err; } c.data.type = type; c.event = nlh->nlmsg_type; c.seq = nlh->nlmsg_seq; c.portid = nlh->nlmsg_pid; c.net = net; km_policy_notify(NULL, 0, &c); return 0; } static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_policy *xp; struct xfrm_user_polexpire *up = nlmsg_data(nlh); struct xfrm_userpolicy_info *p = &up->pol; u8 type = XFRM_POLICY_TYPE_MAIN; int err = -ENOENT; struct xfrm_mark m; u32 if_id = 0; err = copy_from_user_policy_type(&type, attrs, extack); if (err) return err; err = verify_policy_dir(p->dir, extack); if (err) return err; if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); xfrm_mark_get(attrs, &m); if (p->index) xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index, 0, &err); else { struct nlattr *rt = attrs[XFRMA_SEC_CTX]; struct xfrm_sec_ctx *ctx; err = verify_sec_ctx_len(attrs, extack); if (err) return err; ctx = NULL; if (rt) { struct xfrm_user_sec_ctx *uctx = nla_data(rt); err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL); if (err) return err; } xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir, &p->sel, ctx, 0, &err); security_xfrm_policy_free(ctx); } if (xp == NULL) return -ENOENT; if (unlikely(xp->walk.dead)) goto out; err = 0; if (up->hard) { xfrm_policy_delete(xp, p->dir); xfrm_audit_policy_delete(xp, 1, true); } km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid); out: xfrm_pol_put(xp); return err; } static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_state *x; int err; struct xfrm_user_expire *ue = nlmsg_data(nlh); struct xfrm_usersa_info *p = &ue->state; struct xfrm_mark m; u32 mark = xfrm_mark_get(attrs, &m); x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); err = -ENOENT; if (x == NULL) return err; spin_lock_bh(&x->lock); err = -EINVAL; if (x->km.state != XFRM_STATE_VALID) { NL_SET_ERR_MSG(extack, "SA must be in VALID state"); goto out; } km_state_expired(x, ue->hard, nlh->nlmsg_pid); if (ue->hard) { __xfrm_state_delete(x); xfrm_audit_state_delete(x, 1, true); } err = 0; out: spin_unlock_bh(&x->lock); xfrm_state_put(x); return err; } static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct xfrm_policy *xp; struct xfrm_user_tmpl *ut; int i; struct nlattr *rt = attrs[XFRMA_TMPL]; struct xfrm_mark mark; struct xfrm_user_acquire *ua = nlmsg_data(nlh); struct xfrm_state *x = xfrm_state_alloc(net); int err = -ENOMEM; if (!x) goto nomem; xfrm_mark_get(attrs, &mark); if (attrs[XFRMA_SA_PCPU]) { x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]); err = -EINVAL; if (x->pcpu_num >= num_possible_cpus()) goto free_state; } err = verify_newpolicy_info(&ua->policy, extack); if (err) goto free_state; err = verify_sec_ctx_len(attrs, extack); if (err) goto free_state; /* build an XP */ xp = xfrm_policy_construct(net, &ua->policy, attrs, &err, extack); if (!xp) goto free_state; memcpy(&x->id, &ua->id, sizeof(ua->id)); memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); xp->mark.m = x->mark.m = mark.m; xp->mark.v = x->mark.v = mark.v; ut = nla_data(rt); /* extract the templates and for each call km_key */ for (i = 0; i < xp->xfrm_nr; i++, ut++) { struct xfrm_tmpl *t = &xp->xfrm_vec[i]; memcpy(&x->id, &t->id, sizeof(x->id)); x->props.mode = t->mode; x->props.reqid = t->reqid; x->props.family = ut->family; t->aalgos = ua->aalgos; t->ealgos = ua->ealgos; t->calgos = ua->calgos; err = km_query(x, t, xp); } xfrm_state_free(x); kfree(xp); return 0; free_state: xfrm_state_free(x); nomem: return err; } #ifdef CONFIG_XFRM_MIGRATE static int copy_from_user_migrate(struct xfrm_migrate *ma, struct xfrm_kmaddress *k, struct nlattr **attrs, int *num, struct netlink_ext_ack *extack) { struct nlattr *rt = attrs[XFRMA_MIGRATE]; struct xfrm_user_migrate *um; int i, num_migrate; if (k != NULL) { struct xfrm_user_kmaddress *uk; uk = nla_data(attrs[XFRMA_KMADDRESS]); memcpy(&k->local, &uk->local, sizeof(k->local)); memcpy(&k->remote, &uk->remote, sizeof(k->remote)); k->family = uk->family; k->reserved = uk->reserved; } um = nla_data(rt); num_migrate = nla_len(rt) / sizeof(*um); if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) { NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)"); return -EINVAL; } for (i = 0; i < num_migrate; i++, um++, ma++) { memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); ma->proto = um->proto; ma->mode = um->mode; ma->reqid = um->reqid; ma->old_family = um->old_family; ma->new_family = um->new_family; } *num = i; return 0; } static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); struct xfrm_migrate m[XFRM_MAX_DEPTH]; struct xfrm_kmaddress km, *kmp; u8 type; int err; int n = 0; struct net *net = sock_net(skb->sk); struct xfrm_encap_tmpl *encap = NULL; struct xfrm_user_offload *xuo = NULL; u32 if_id = 0; if (!attrs[XFRMA_MIGRATE]) { NL_SET_ERR_MSG(extack, "Missing required MIGRATE attribute"); return -EINVAL; } kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL; err = copy_from_user_policy_type(&type, attrs, extack); if (err) return err; err = copy_from_user_migrate(m, kmp, attrs, &n, extack); if (err) return err; if (!n) return 0; if (attrs[XFRMA_ENCAP]) { encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), sizeof(*encap), GFP_KERNEL); if (!encap) return -ENOMEM; } if (attrs[XFRMA_IF_ID]) if_id = nla_get_u32(attrs[XFRMA_IF_ID]); if (attrs[XFRMA_OFFLOAD_DEV]) { xuo = kmemdup(nla_data(attrs[XFRMA_OFFLOAD_DEV]), sizeof(*xuo), GFP_KERNEL); if (!xuo) { err = -ENOMEM; goto error; } } err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id, extack, xuo); error: kfree(encap); kfree(xuo); return err; } #else static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr **attrs, struct netlink_ext_ack *extack) { return -ENOPROTOOPT; } #endif #ifdef CONFIG_XFRM_MIGRATE static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb) { struct xfrm_user_migrate um; memset(&um, 0, sizeof(um)); um.proto = m->proto; um.mode = m->mode; um.reqid = m->reqid; um.old_family = m->old_family; memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); um.new_family = m->new_family; memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); } static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb) { struct xfrm_user_kmaddress uk; memset(&uk, 0, sizeof(uk)); uk.family = k->family; uk.reserved = k->reserved; memcpy(&uk.local, &k->local, sizeof(uk.local)); memcpy(&uk.remote, &k->remote, sizeof(uk.remote)); return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); } static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma, int with_encp) { return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id)) + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0) + (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0) + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate) + userpolicy_type_attrsize(); } static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, int num_migrate, const struct xfrm_kmaddress *k, const struct xfrm_selector *sel, const struct xfrm_encap_tmpl *encap, u8 dir, u8 type) { const struct xfrm_migrate *mp; struct xfrm_userpolicy_id *pol_id; struct nlmsghdr *nlh; int i, err; nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); if (nlh == NULL) return -EMSGSIZE; pol_id = nlmsg_data(nlh); /* copy data from selector, dir, and type to the pol_id */ memset(pol_id, 0, sizeof(*pol_id)); memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); pol_id->dir = dir; if (k != NULL) { err = copy_to_user_kmaddress(k, skb); if (err) goto out_cancel; } if (encap) { err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap); if (err) goto out_cancel; } err = copy_to_user_policy_type(type, skb); if (err) goto out_cancel; for (i = 0, mp = m ; i < num_migrate; i++, mp++) { err = copy_to_user_migrate(mp, skb); if (err) goto out_cancel; } nlmsg_end(skb, nlh); return 0; out_cancel: nlmsg_cancel(skb, nlh); return err; } static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, const struct xfrm_migrate *m, int num_migrate, const struct xfrm_kmaddress *k, const struct xfrm_encap_tmpl *encap) { struct net *net = &init_net; struct sk_buff *skb; int err; skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; /* build migrate */ err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type); BUG_ON(err < 0); return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE); } #else static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, const struct xfrm_migrate *m, int num_migrate, const struct xfrm_kmaddress *k, const struct xfrm_encap_tmpl *encap) { return -ENOPROTOOPT; } #endif #define XMSGSIZE(type) sizeof(struct type) const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32), [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default), [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default), }; EXPORT_SYMBOL_GPL(xfrm_msg_min); #undef XMSGSIZE const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_UNSPEC] = { .strict_start_type = XFRMA_SA_DIR }, [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)}, [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)}, [XFRMA_LASTUSED] = { .type = NLA_U64}, [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)}, [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, [XFRMA_TFCPAD] = { .type = NLA_U32 }, [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) }, [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 }, [XFRMA_PROTO] = { .type = NLA_U8 }, [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, [XFRMA_SET_MARK] = { .type = NLA_U32 }, [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, [XFRMA_IF_ID] = { .type = NLA_U32 }, [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, [XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT), [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 }, [XFRMA_SA_PCPU] = { .type = NLA_U32 }, [XFRMA_IPTFS_DROP_TIME] = { .type = NLA_U32 }, [XFRMA_IPTFS_REORDER_WINDOW] = { .type = NLA_U16 }, [XFRMA_IPTFS_DONT_FRAG] = { .type = NLA_FLAG }, [XFRMA_IPTFS_INIT_DELAY] = { .type = NLA_U32 }, [XFRMA_IPTFS_MAX_QSIZE] = { .type = NLA_U32 }, [XFRMA_IPTFS_PKT_SIZE] = { .type = NLA_U32 }, }; EXPORT_SYMBOL_GPL(xfrma_policy); static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) }, }; static const struct xfrm_link { int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **, struct netlink_ext_ack *); int (*start)(struct netlink_callback *); int (*dump)(struct sk_buff *, struct netlink_callback *); int (*done)(struct netlink_callback *); const struct nla_policy *nla_pol; int nla_max; } xfrm_dispatch[XFRM_NR_MSGTYPES] = { [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, .dump = xfrm_dump_sa, .done = xfrm_dump_sa_done }, [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, .start = xfrm_dump_policy_start, .dump = xfrm_dump_policy, .done = xfrm_dump_policy_done }, [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo, .nla_pol = xfrma_spd_policy, .nla_max = XFRMA_SPD_MAX }, [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_set_default }, [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_get_default }, }; static int xfrm_reject_unused_attr(int type, struct nlattr **attrs, struct netlink_ext_ack *extack) { if (attrs[XFRMA_SA_DIR]) { switch (type) { case XFRM_MSG_NEWSA: case XFRM_MSG_UPDSA: case XFRM_MSG_ALLOCSPI: break; default: NL_SET_ERR_MSG(extack, "Invalid attribute SA_DIR"); return -EINVAL; } } if (attrs[XFRMA_SA_PCPU]) { switch (type) { case XFRM_MSG_NEWSA: case XFRM_MSG_UPDSA: case XFRM_MSG_ALLOCSPI: case XFRM_MSG_ACQUIRE: break; default: NL_SET_ERR_MSG(extack, "Invalid attribute SA_PCPU"); return -EINVAL; } } return 0; } static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(skb->sk); struct nlattr *attrs[XFRMA_MAX+1]; const struct xfrm_link *link; struct nlmsghdr *nlh64 = NULL; int type, err; type = nlh->nlmsg_type; if (type > XFRM_MSG_MAX) return -EINVAL; type -= XFRM_MSG_BASE; link = &xfrm_dispatch[type]; /* All operations require privileges, even GET */ if (!netlink_net_capable(skb, CAP_NET_ADMIN)) return -EPERM; if (in_compat_syscall()) { struct xfrm_translator *xtr = xfrm_get_translator(); if (!xtr) return -EOPNOTSUPP; nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max, link->nla_pol, extack); xfrm_put_translator(xtr); if (IS_ERR(nlh64)) return PTR_ERR(nlh64); if (nlh64) nlh = nlh64; } if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && (nlh->nlmsg_flags & NLM_F_DUMP)) { struct netlink_dump_control c = { .start = link->start, .dump = link->dump, .done = link->done, }; if (link->dump == NULL) { err = -EINVAL; goto err; } err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c); goto err; } err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs, link->nla_max ? : XFRMA_MAX, link->nla_pol ? : xfrma_policy, extack); if (err < 0) goto err; if (!link->nla_pol || link->nla_pol == xfrma_policy) { err = xfrm_reject_unused_attr((type + XFRM_MSG_BASE), attrs, extack); if (err < 0) goto err; } if (link->doit == NULL) { err = -EINVAL; goto err; } err = link->doit(skb, nlh, attrs, extack); /* We need to free skb allocated in xfrm_alloc_compat() before * returning from this function, because consume_skb() won't take * care of frag_list since netlink destructor sets * sbk->head to NULL. (see netlink_skb_destructor()) */ if (skb_has_frag_list(skb)) { kfree_skb(skb_shinfo(skb)->frag_list); skb_shinfo(skb)->frag_list = NULL; } err: kvfree(nlh64); return err; } static void xfrm_netlink_rcv(struct sk_buff *skb) { struct net *net = sock_net(skb->sk); mutex_lock(&net->xfrm.xfrm_cfg_mutex); netlink_rcv_skb(skb, &xfrm_user_rcv_msg); mutex_unlock(&net->xfrm.xfrm_cfg_mutex); } static inline unsigned int xfrm_expire_msgsize(void) { return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) + nla_total_size(sizeof(struct xfrm_mark)) + nla_total_size(sizeof_field(struct xfrm_state, dir)) + nla_total_size(4); /* XFRMA_SA_PCPU */ } static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) { struct xfrm_user_expire *ue; struct nlmsghdr *nlh; int err; nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); if (nlh == NULL) return -EMSGSIZE; ue = nlmsg_data(nlh); copy_to_user_state(x, &ue->state); ue->hard = (c->data.hard != 0) ? 1 : 0; /* clear the padding bytes */ memset_after(ue, 0, hard); err = xfrm_mark_put(skb, &x->mark); if (err) return err; err = xfrm_if_id_put(skb, x->if_id); if (err) return err; if (x->pcpu_num != UINT_MAX) { err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); if (err) return err; } if (x->dir) { err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir); if (err) return err; } nlmsg_end(skb, nlh); return 0; } static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) { struct net *net = xs_net(x); struct sk_buff *skb; skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; if (build_expire(skb, x, c) < 0) { kfree_skb(skb); return -EMSGSIZE; } return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE); } static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) { struct net *net = xs_net(x); struct sk_buff *skb; int err; skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; err = build_aevent(skb, x, c); BUG_ON(err < 0); return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS); } static int xfrm_notify_sa_flush(const struct km_event *c) { struct net *net = c->net; struct xfrm_usersa_flush *p; struct nlmsghdr *nlh; struct sk_buff *skb; int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush)); skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); if (nlh == NULL) { kfree_skb(skb); return -EMSGSIZE; } p = nlmsg_data(nlh); p->proto = c->data.proto; nlmsg_end(skb, nlh); return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA); } static inline unsigned int xfrm_sa_len(struct xfrm_state *x) { unsigned int l = 0; if (x->aead) l += nla_total_size(aead_len(x->aead)); if (x->aalg) { l += nla_total_size(sizeof(struct xfrm_algo) + (x->aalg->alg_key_len + 7) / 8); l += nla_total_size(xfrm_alg_auth_len(x->aalg)); } if (x->ealg) l += nla_total_size(xfrm_alg_len(x->ealg)); if (x->calg) l += nla_total_size(sizeof(*x->calg)); if (x->encap) l += nla_total_size(sizeof(*x->encap)); if (x->tfcpad) l += nla_total_size(sizeof(x->tfcpad)); if (x->replay_esn) l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); else l += nla_total_size(sizeof(struct xfrm_replay_state)); if (x->security) l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + x->security->ctx_len); if (x->coaddr) l += nla_total_size(sizeof(*x->coaddr)); if (x->props.extra_flags) l += nla_total_size(sizeof(x->props.extra_flags)); if (x->xso.dev) l += nla_total_size(sizeof(struct xfrm_user_offload)); if (x->props.smark.v | x->props.smark.m) { l += nla_total_size(sizeof(x->props.smark.v)); l += nla_total_size(sizeof(x->props.smark.m)); } if (x->if_id) l += nla_total_size(sizeof(x->if_id)); if (x->pcpu_num) l += nla_total_size(sizeof(x->pcpu_num)); /* Must count x->lastused as it may become non-zero behind our back. */ l += nla_total_size_64bit(sizeof(u64)); if (x->mapping_maxage) l += nla_total_size(sizeof(x->mapping_maxage)); if (x->dir) l += nla_total_size(sizeof(x->dir)); if (x->nat_keepalive_interval) l += nla_total_size(sizeof(x->nat_keepalive_interval)); if (x->mode_cbs && x->mode_cbs->sa_len) l += x->mode_cbs->sa_len(x); return l; } static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) { struct net *net = xs_net(x); struct xfrm_usersa_info *p; struct xfrm_usersa_id *id; struct nlmsghdr *nlh; struct sk_buff *skb; unsigned int len = xfrm_sa_len(x); unsigned int headlen; int err; headlen = sizeof(*p); if (c->event == XFRM_MSG_DELSA) { len += nla_total_size(headlen); headlen = sizeof(*id); len += nla_total_size(sizeof(struct xfrm_mark)); } len += NLMSG_ALIGN(headlen); skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); err = -EMSGSIZE; if (nlh == NULL) goto out_free_skb; p = nlmsg_data(nlh); if (c->event == XFRM_MSG_DELSA) { struct nlattr *attr; id = nlmsg_data(nlh); memset(id, 0, sizeof(*id)); memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); id->spi = x->id.spi; id->family = x->props.family; id->proto = x->id.proto; attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); err = -EMSGSIZE; if (attr == NULL) goto out_free_skb; p = nla_data(attr); } err = copy_to_user_state_extra(x, p, skb); if (err) goto out_free_skb; nlmsg_end(skb, nlh); return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA); out_free_skb: kfree_skb(skb); return err; } static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) { switch (c->event) { case XFRM_MSG_EXPIRE: return xfrm_exp_state_notify(x, c); case XFRM_MSG_NEWAE: return xfrm_aevent_state_notify(x, c); case XFRM_MSG_DELSA: case XFRM_MSG_UPDSA: case XFRM_MSG_NEWSA: return xfrm_notify_sa(x, c); case XFRM_MSG_FLUSHSA: return xfrm_notify_sa_flush(c); default: printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", c->event); break; } return 0; } static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x, struct xfrm_policy *xp) { return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) + nla_total_size(sizeof(struct xfrm_mark)) + nla_total_size(xfrm_user_sec_ctx_size(x->security)) + nla_total_size(4) /* XFRMA_SA_PCPU */ + userpolicy_type_attrsize(); } static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, struct xfrm_tmpl *xt, struct xfrm_policy *xp) { __u32 seq = xfrm_get_acqseq(); struct xfrm_user_acquire *ua; struct nlmsghdr *nlh; int err; nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); if (nlh == NULL) return -EMSGSIZE; ua = nlmsg_data(nlh); memcpy(&ua->id, &x->id, sizeof(ua->id)); memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT); ua->aalgos = xt->aalgos; ua->ealgos = xt->ealgos; ua->calgos = xt->calgos; ua->seq = x->km.seq = seq; err = copy_to_user_tmpl(xp, skb); if (!err) err = copy_to_user_state_sec_ctx(x, skb); if (!err) err = copy_to_user_policy_type(xp->type, skb); if (!err) err = xfrm_mark_put(skb, &xp->mark); if (!err) err = xfrm_if_id_put(skb, xp->if_id); if (!err && xp->xdo.dev) err = copy_user_offload(&xp->xdo, skb); if (!err && x->pcpu_num != UINT_MAX) err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num); if (err) { nlmsg_cancel(skb, nlh); return err; } nlmsg_end(skb, nlh); return 0; } static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, struct xfrm_policy *xp) { struct net *net = xs_net(x); struct sk_buff *skb; int err; skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; err = build_acquire(skb, x, xt, xp); BUG_ON(err < 0); return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE); } /* User gives us xfrm_user_policy_info followed by an array of 0 * or more templates. */ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, u8 *data, int len, int *dir) { struct net *net = sock_net(sk); struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); struct xfrm_policy *xp; int nr; switch (sk->sk_family) { case AF_INET: if (opt != IP_XFRM_POLICY) { *dir = -EOPNOTSUPP; return NULL; } break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: if (opt != IPV6_XFRM_POLICY) { *dir = -EOPNOTSUPP; return NULL; } break; #endif default: *dir = -EINVAL; return NULL; } *dir = -EINVAL; if (len < sizeof(*p) || verify_newpolicy_info(p, NULL)) return NULL; nr = ((len - sizeof(*p)) / sizeof(*ut)); if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL)) return NULL; if (p->dir > XFRM_POLICY_OUT) return NULL; xp = xfrm_policy_alloc(net, GFP_ATOMIC); if (xp == NULL) { *dir = -ENOBUFS; return NULL; } copy_from_user_policy(xp, p); xp->type = XFRM_POLICY_TYPE_MAIN; copy_templates(xp, ut, nr); *dir = p->dir; return xp; } static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp) { return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) + nla_total_size(sizeof(struct xfrm_mark)) + userpolicy_type_attrsize(); } static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, int dir, const struct km_event *c) { struct xfrm_user_polexpire *upe; int hard = c->data.hard; struct nlmsghdr *nlh; int err; nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); if (nlh == NULL) return -EMSGSIZE; upe = nlmsg_data(nlh); copy_to_user_policy(xp, &upe->pol, dir); err = copy_to_user_tmpl(xp, skb); if (!err) err = copy_to_user_sec_ctx(xp, skb); if (!err) err = copy_to_user_policy_type(xp->type, skb); if (!err) err = xfrm_mark_put(skb, &xp->mark); if (!err) err = xfrm_if_id_put(skb, xp->if_id); if (!err && xp->xdo.dev) err = copy_user_offload(&xp->xdo, skb); if (err) { nlmsg_cancel(skb, nlh); return err; } upe->hard = !!hard; nlmsg_end(skb, nlh); return 0; } static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) { struct net *net = xp_net(xp); struct sk_buff *skb; int err; skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; err = build_polexpire(skb, xp, dir, c); BUG_ON(err < 0); return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE); } static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) { unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); struct net *net = xp_net(xp); struct xfrm_userpolicy_info *p; struct xfrm_userpolicy_id *id; struct nlmsghdr *nlh; struct sk_buff *skb; unsigned int headlen; int err; headlen = sizeof(*p); if (c->event == XFRM_MSG_DELPOLICY) { len += nla_total_size(headlen); headlen = sizeof(*id); } len += userpolicy_type_attrsize(); len += nla_total_size(sizeof(struct xfrm_mark)); len += NLMSG_ALIGN(headlen); skb = nlmsg_new(len, GFP_ATOMIC); if (skb == NULL) return -ENOMEM; nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0); err = -EMSGSIZE; if (nlh == NULL) goto out_free_skb; p = nlmsg_data(nlh); if (c->event == XFRM_MSG_DELPOLICY) { struct nlattr *attr; id = nlmsg_data(nlh); memset(id, 0, sizeof(*id)); id->dir = dir; if (c->data.byid) id->index = xp->index; else memcpy(&id->sel, &xp->selector, sizeof(id->sel)); attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); err = -EMSGSIZE; if (attr == NULL) goto out_free_skb; p = nla_data(attr); } copy_to_user_policy(xp, p, dir); err = copy_to_user_tmpl(xp, skb); if (!err) err = copy_to_user_policy_type(xp->type, skb); if (!err) err = xfrm_mark_put(skb, &xp->mark); if (!err) err = xfrm_if_id_put(skb, xp->if_id); if (!err && xp->xdo.dev) err = copy_user_offload(&xp->xdo, skb); if (err) goto out_free_skb; nlmsg_end(skb, nlh); return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); out_free_skb: kfree_skb(skb); return err; } static int xfrm_notify_policy_flush(const struct km_event *c) { struct net *net = c->net; struct nlmsghdr *nlh; struct sk_buff *skb; int err; skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); err = -EMSGSIZE; if (nlh == NULL) goto out_free_skb; err = copy_to_user_policy_type(c->data.type, skb); if (err) goto out_free_skb; nlmsg_end(skb, nlh); return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY); out_free_skb: kfree_skb(skb); return err; } static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) { switch (c->event) { case XFRM_MSG_NEWPOLICY: case XFRM_MSG_UPDPOLICY: case XFRM_MSG_DELPOLICY: return xfrm_notify_policy(xp, dir, c); case XFRM_MSG_FLUSHPOLICY: return xfrm_notify_policy_flush(c); case XFRM_MSG_POLEXPIRE: return xfrm_exp_policy_notify(xp, dir, c); default: printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", c->event); } return 0; } static inline unsigned int xfrm_report_msgsize(void) { return NLMSG_ALIGN(sizeof(struct xfrm_user_report)); } static int build_report(struct sk_buff *skb, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) { struct xfrm_user_report *ur; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); if (nlh == NULL) return -EMSGSIZE; ur = nlmsg_data(nlh); ur->proto = proto; memcpy(&ur->sel, sel, sizeof(ur->sel)); if (addr) { int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr); if (err) { nlmsg_cancel(skb, nlh); return err; } } nlmsg_end(skb, nlh); return 0; } static int xfrm_send_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) { struct sk_buff *skb; int err; skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; err = build_report(skb, proto, sel, addr); BUG_ON(err < 0); return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT); } static inline unsigned int xfrm_mapping_msgsize(void) { return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping)); } static int build_mapping(struct sk_buff *skb, struct xfrm_state *x, xfrm_address_t *new_saddr, __be16 new_sport) { struct xfrm_user_mapping *um; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0); if (nlh == NULL) return -EMSGSIZE; um = nlmsg_data(nlh); memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr)); um->id.spi = x->id.spi; um->id.family = x->props.family; um->id.proto = x->id.proto; memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr)); memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr)); um->new_sport = new_sport; um->old_sport = x->encap->encap_sport; um->reqid = x->props.reqid; nlmsg_end(skb, nlh); return 0; } static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) { struct net *net = xs_net(x); struct sk_buff *skb; int err; if (x->id.proto != IPPROTO_ESP) return -EINVAL; if (!x->encap) return -EINVAL; skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC); if (skb == NULL) return -ENOMEM; err = build_mapping(skb, x, ipaddr, sport); BUG_ON(err < 0); return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING); } static bool xfrm_is_alive(const struct km_event *c) { return (bool)xfrm_acquire_is_on(c->net); } static struct xfrm_mgr netlink_mgr = { .notify = xfrm_send_state_notify, .acquire = xfrm_send_acquire, .compile_policy = xfrm_compile_policy, .notify_policy = xfrm_send_policy_notify, .report = xfrm_send_report, .migrate = xfrm_send_migrate, .new_mapping = xfrm_send_mapping, .is_alive = xfrm_is_alive, }; static int __net_init xfrm_user_net_init(struct net *net) { struct sock *nlsk; struct netlink_kernel_cfg cfg = { .groups = XFRMNLGRP_MAX, .input = xfrm_netlink_rcv, }; nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg); if (nlsk == NULL) return -ENOMEM; net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ rcu_assign_pointer(net->xfrm.nlsk, nlsk); return 0; } static void __net_exit xfrm_user_net_pre_exit(struct net *net) { RCU_INIT_POINTER(net->xfrm.nlsk, NULL); } static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list) { struct net *net; list_for_each_entry(net, net_exit_list, exit_list) netlink_kernel_release(net->xfrm.nlsk_stash); } static struct pernet_operations xfrm_user_net_ops = { .init = xfrm_user_net_init, .pre_exit = xfrm_user_net_pre_exit, .exit_batch = xfrm_user_net_exit, }; static int __init xfrm_user_init(void) { int rv; printk(KERN_INFO "Initializing XFRM netlink socket\n"); rv = register_pernet_subsys(&xfrm_user_net_ops); if (rv < 0) return rv; xfrm_register_km(&netlink_mgr); return 0; } static void __exit xfrm_user_exit(void) { xfrm_unregister_km(&netlink_mgr); unregister_pernet_subsys(&xfrm_user_net_ops); } module_init(xfrm_user_init); module_exit(xfrm_user_exit); MODULE_DESCRIPTION("XFRM User interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); |
| 5 5 9 1 1 1 1 5 1 1 2 5 5 1 4 4 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 | // SPDX-License-Identifier: GPL-2.0+ /* * MaxLinear/Exar USB to Serial driver * * Copyright (c) 2020 Manivannan Sadhasivam <mani@kernel.org> * Copyright (c) 2021 Johan Hovold <johan@kernel.org> * * Based on the initial driver written by Patong Yang: * * https://lore.kernel.org/r/20180404070634.nhspvmxcjwfgjkcv@advantechmxl-desktop * * Copyright (c) 2018 Patong Yang <patong.mxl@gmail.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/usb.h> #include <linux/usb/cdc.h> #include <linux/usb/serial.h> struct xr_txrx_clk_mask { u16 tx; u16 rx0; u16 rx1; }; #define XR_INT_OSC_HZ 48000000U #define XR21V141X_MIN_SPEED 46U #define XR21V141X_MAX_SPEED XR_INT_OSC_HZ /* XR21V141X register blocks */ #define XR21V141X_UART_REG_BLOCK 0 #define XR21V141X_UM_REG_BLOCK 4 #define XR21V141X_UART_CUSTOM_BLOCK 0x66 /* XR21V141X UART registers */ #define XR21V141X_CLOCK_DIVISOR_0 0x04 #define XR21V141X_CLOCK_DIVISOR_1 0x05 #define XR21V141X_CLOCK_DIVISOR_2 0x06 #define XR21V141X_TX_CLOCK_MASK_0 0x07 #define XR21V141X_TX_CLOCK_MASK_1 0x08 #define XR21V141X_RX_CLOCK_MASK_0 0x09 #define XR21V141X_RX_CLOCK_MASK_1 0x0a #define XR21V141X_REG_FORMAT 0x0b /* XR21V141X UART Manager registers */ #define XR21V141X_UM_FIFO_ENABLE_REG 0x10 #define XR21V141X_UM_ENABLE_TX_FIFO 0x01 #define XR21V141X_UM_ENABLE_RX_FIFO 0x02 #define XR21V141X_UM_RX_FIFO_RESET 0x18 #define XR21V141X_UM_TX_FIFO_RESET 0x1c #define XR_UART_ENABLE_TX 0x1 #define XR_UART_ENABLE_RX 0x2 #define XR_GPIO_RI BIT(0) #define XR_GPIO_CD BIT(1) #define XR_GPIO_DSR BIT(2) #define XR_GPIO_DTR BIT(3) #define XR_GPIO_CTS BIT(4) #define XR_GPIO_RTS BIT(5) #define XR_GPIO_CLK BIT(6) #define XR_GPIO_XEN BIT(7) #define XR_GPIO_TXT BIT(8) #define XR_GPIO_RXT BIT(9) #define XR_UART_DATA_MASK GENMASK(3, 0) #define XR_UART_DATA_7 0x7 #define XR_UART_DATA_8 0x8 #define XR_UART_PARITY_MASK GENMASK(6, 4) #define XR_UART_PARITY_SHIFT 4 #define XR_UART_PARITY_NONE (0x0 << XR_UART_PARITY_SHIFT) #define XR_UART_PARITY_ODD (0x1 << XR_UART_PARITY_SHIFT) #define XR_UART_PARITY_EVEN (0x2 << XR_UART_PARITY_SHIFT) #define XR_UART_PARITY_MARK (0x3 << XR_UART_PARITY_SHIFT) #define XR_UART_PARITY_SPACE (0x4 << XR_UART_PARITY_SHIFT) #define XR_UART_STOP_MASK BIT(7) #define XR_UART_STOP_SHIFT 7 #define XR_UART_STOP_1 (0x0 << XR_UART_STOP_SHIFT) #define XR_UART_STOP_2 (0x1 << XR_UART_STOP_SHIFT) #define XR_UART_FLOW_MODE_NONE 0x0 #define XR_UART_FLOW_MODE_HW 0x1 #define XR_UART_FLOW_MODE_SW 0x2 #define XR_GPIO_MODE_SEL_MASK GENMASK(2, 0) #define XR_GPIO_MODE_SEL_RTS_CTS 0x1 #define XR_GPIO_MODE_SEL_DTR_DSR 0x2 #define XR_GPIO_MODE_SEL_RS485 0x3 #define XR_GPIO_MODE_SEL_RS485_ADDR 0x4 #define XR_GPIO_MODE_RS485_TX_H 0x8 #define XR_GPIO_MODE_TX_TOGGLE 0x100 #define XR_GPIO_MODE_RX_TOGGLE 0x200 #define XR_FIFO_RESET 0x1 #define XR_CUSTOM_DRIVER_ACTIVE 0x1 static int xr21v141x_uart_enable(struct usb_serial_port *port); static int xr21v141x_uart_disable(struct usb_serial_port *port); static int xr21v141x_fifo_reset(struct usb_serial_port *port); static void xr21v141x_set_line_settings(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); struct xr_type { int reg_width; u8 reg_recipient; u8 set_reg; u8 get_reg; u16 uart_enable; u16 flow_control; u16 xon_char; u16 xoff_char; u16 tx_break; u16 gpio_mode; u16 gpio_direction; u16 gpio_set; u16 gpio_clear; u16 gpio_status; u16 tx_fifo_reset; u16 rx_fifo_reset; u16 custom_driver; bool have_5_6_bit_mode; bool have_xmit_toggle; int (*enable)(struct usb_serial_port *port); int (*disable)(struct usb_serial_port *port); int (*fifo_reset)(struct usb_serial_port *port); void (*set_line_settings)(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios); }; enum xr_type_id { XR21V141X, XR21B142X, XR21B1411, XR2280X, XR_TYPE_COUNT, }; static const struct xr_type xr_types[] = { [XR21V141X] = { .reg_width = 8, .reg_recipient = USB_RECIP_DEVICE, .set_reg = 0x00, .get_reg = 0x01, .uart_enable = 0x03, .flow_control = 0x0c, .xon_char = 0x10, .xoff_char = 0x11, .tx_break = 0x14, .gpio_mode = 0x1a, .gpio_direction = 0x1b, .gpio_set = 0x1d, .gpio_clear = 0x1e, .gpio_status = 0x1f, .enable = xr21v141x_uart_enable, .disable = xr21v141x_uart_disable, .fifo_reset = xr21v141x_fifo_reset, .set_line_settings = xr21v141x_set_line_settings, }, [XR21B142X] = { .reg_width = 16, .reg_recipient = USB_RECIP_INTERFACE, .set_reg = 0x00, .get_reg = 0x00, .uart_enable = 0x00, .flow_control = 0x06, .xon_char = 0x07, .xoff_char = 0x08, .tx_break = 0x0a, .gpio_mode = 0x0c, .gpio_direction = 0x0d, .gpio_set = 0x0e, .gpio_clear = 0x0f, .gpio_status = 0x10, .tx_fifo_reset = 0x40, .rx_fifo_reset = 0x43, .custom_driver = 0x60, .have_5_6_bit_mode = true, .have_xmit_toggle = true, }, [XR21B1411] = { .reg_width = 12, .reg_recipient = USB_RECIP_DEVICE, .set_reg = 0x00, .get_reg = 0x01, .uart_enable = 0xc00, .flow_control = 0xc06, .xon_char = 0xc07, .xoff_char = 0xc08, .tx_break = 0xc0a, .gpio_mode = 0xc0c, .gpio_direction = 0xc0d, .gpio_set = 0xc0e, .gpio_clear = 0xc0f, .gpio_status = 0xc10, .tx_fifo_reset = 0xc80, .rx_fifo_reset = 0xcc0, .custom_driver = 0x20d, }, [XR2280X] = { .reg_width = 16, .reg_recipient = USB_RECIP_DEVICE, .set_reg = 0x05, .get_reg = 0x05, .uart_enable = 0x40, .flow_control = 0x46, .xon_char = 0x47, .xoff_char = 0x48, .tx_break = 0x4a, .gpio_mode = 0x4c, .gpio_direction = 0x4d, .gpio_set = 0x4e, .gpio_clear = 0x4f, .gpio_status = 0x50, .tx_fifo_reset = 0x60, .rx_fifo_reset = 0x63, .custom_driver = 0x81, }, }; struct xr_data { const struct xr_type *type; u8 channel; /* zero-based index or interface number */ struct serial_rs485 rs485; }; static int xr_set_reg(struct usb_serial_port *port, u8 channel, u16 reg, u16 val) { struct xr_data *data = usb_get_serial_port_data(port); const struct xr_type *type = data->type; struct usb_serial *serial = port->serial; int ret; ret = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), type->set_reg, USB_DIR_OUT | USB_TYPE_VENDOR | type->reg_recipient, val, (channel << 8) | reg, NULL, 0, USB_CTRL_SET_TIMEOUT); if (ret < 0) { dev_err(&port->dev, "Failed to set reg 0x%02x: %d\n", reg, ret); return ret; } return 0; } static int xr_get_reg(struct usb_serial_port *port, u8 channel, u16 reg, u16 *val) { struct xr_data *data = usb_get_serial_port_data(port); const struct xr_type *type = data->type; struct usb_serial *serial = port->serial; u8 *dmabuf; int ret, len; if (type->reg_width == 8) len = 1; else len = 2; dmabuf = kmalloc(len, GFP_KERNEL); if (!dmabuf) return -ENOMEM; ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), type->get_reg, USB_DIR_IN | USB_TYPE_VENDOR | type->reg_recipient, 0, (channel << 8) | reg, dmabuf, len, USB_CTRL_GET_TIMEOUT); if (ret == len) { if (len == 2) *val = le16_to_cpup((__le16 *)dmabuf); else *val = *dmabuf; ret = 0; } else { dev_err(&port->dev, "Failed to get reg 0x%02x: %d\n", reg, ret); if (ret >= 0) ret = -EIO; } kfree(dmabuf); return ret; } static int xr_set_reg_uart(struct usb_serial_port *port, u16 reg, u16 val) { struct xr_data *data = usb_get_serial_port_data(port); return xr_set_reg(port, data->channel, reg, val); } static int xr_get_reg_uart(struct usb_serial_port *port, u16 reg, u16 *val) { struct xr_data *data = usb_get_serial_port_data(port); return xr_get_reg(port, data->channel, reg, val); } static int xr_set_reg_um(struct usb_serial_port *port, u8 reg_base, u8 val) { struct xr_data *data = usb_get_serial_port_data(port); u8 reg; reg = reg_base + data->channel; return xr_set_reg(port, XR21V141X_UM_REG_BLOCK, reg, val); } static int __xr_uart_enable(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); return xr_set_reg_uart(port, data->type->uart_enable, XR_UART_ENABLE_TX | XR_UART_ENABLE_RX); } static int __xr_uart_disable(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); return xr_set_reg_uart(port, data->type->uart_enable, 0); } /* * According to datasheet, below is the recommended sequence for enabling UART * module in XR21V141X: * * Enable Tx FIFO * Enable Tx and Rx * Enable Rx FIFO */ static int xr21v141x_uart_enable(struct usb_serial_port *port) { int ret; ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, XR21V141X_UM_ENABLE_TX_FIFO); if (ret) return ret; ret = __xr_uart_enable(port); if (ret) return ret; ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, XR21V141X_UM_ENABLE_TX_FIFO | XR21V141X_UM_ENABLE_RX_FIFO); if (ret) __xr_uart_disable(port); return ret; } static int xr21v141x_uart_disable(struct usb_serial_port *port) { int ret; ret = __xr_uart_disable(port); if (ret) return ret; ret = xr_set_reg_um(port, XR21V141X_UM_FIFO_ENABLE_REG, 0); return ret; } static int xr_uart_enable(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); if (data->type->enable) return data->type->enable(port); return __xr_uart_enable(port); } static int xr_uart_disable(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); if (data->type->disable) return data->type->disable(port); return __xr_uart_disable(port); } static int xr21v141x_fifo_reset(struct usb_serial_port *port) { int ret; ret = xr_set_reg_um(port, XR21V141X_UM_TX_FIFO_RESET, XR_FIFO_RESET); if (ret) return ret; ret = xr_set_reg_um(port, XR21V141X_UM_RX_FIFO_RESET, XR_FIFO_RESET); if (ret) return ret; return 0; } static int xr_fifo_reset(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); int ret; if (data->type->fifo_reset) return data->type->fifo_reset(port); ret = xr_set_reg_uart(port, data->type->tx_fifo_reset, XR_FIFO_RESET); if (ret) return ret; ret = xr_set_reg_uart(port, data->type->rx_fifo_reset, XR_FIFO_RESET); if (ret) return ret; return 0; } static int xr_tiocmget(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct xr_data *data = usb_get_serial_port_data(port); u16 status; int ret; ret = xr_get_reg_uart(port, data->type->gpio_status, &status); if (ret) return ret; /* * Modem control pins are active low, so reading '0' means it is active * and '1' means not active. */ ret = ((status & XR_GPIO_DTR) ? 0 : TIOCM_DTR) | ((status & XR_GPIO_RTS) ? 0 : TIOCM_RTS) | ((status & XR_GPIO_CTS) ? 0 : TIOCM_CTS) | ((status & XR_GPIO_DSR) ? 0 : TIOCM_DSR) | ((status & XR_GPIO_RI) ? 0 : TIOCM_RI) | ((status & XR_GPIO_CD) ? 0 : TIOCM_CD); return ret; } static int xr_tiocmset_port(struct usb_serial_port *port, unsigned int set, unsigned int clear) { struct xr_data *data = usb_get_serial_port_data(port); const struct xr_type *type = data->type; u16 gpio_set = 0; u16 gpio_clr = 0; int ret = 0; /* Modem control pins are active low, so set & clr are swapped */ if (set & TIOCM_RTS) gpio_clr |= XR_GPIO_RTS; if (set & TIOCM_DTR) gpio_clr |= XR_GPIO_DTR; if (clear & TIOCM_RTS) gpio_set |= XR_GPIO_RTS; if (clear & TIOCM_DTR) gpio_set |= XR_GPIO_DTR; /* Writing '0' to gpio_{set/clr} bits has no effect, so no need to do */ if (gpio_clr) ret = xr_set_reg_uart(port, type->gpio_clear, gpio_clr); if (gpio_set) ret = xr_set_reg_uart(port, type->gpio_set, gpio_set); return ret; } static int xr_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; return xr_tiocmset_port(port, set, clear); } static void xr_dtr_rts(struct usb_serial_port *port, int on) { if (on) xr_tiocmset_port(port, TIOCM_DTR | TIOCM_RTS, 0); else xr_tiocmset_port(port, 0, TIOCM_DTR | TIOCM_RTS); } static int xr_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct xr_data *data = usb_get_serial_port_data(port); const struct xr_type *type = data->type; u16 state; if (break_state == 0) state = 0; else state = GENMASK(type->reg_width - 1, 0); dev_dbg(&port->dev, "Turning break %s\n", state == 0 ? "off" : "on"); return xr_set_reg_uart(port, type->tx_break, state); } /* Tx and Rx clock mask values obtained from section 3.3.4 of datasheet */ static const struct xr_txrx_clk_mask xr21v141x_txrx_clk_masks[] = { { 0x000, 0x000, 0x000 }, { 0x000, 0x000, 0x000 }, { 0x100, 0x000, 0x100 }, { 0x020, 0x400, 0x020 }, { 0x010, 0x100, 0x010 }, { 0x208, 0x040, 0x208 }, { 0x104, 0x820, 0x108 }, { 0x844, 0x210, 0x884 }, { 0x444, 0x110, 0x444 }, { 0x122, 0x888, 0x224 }, { 0x912, 0x448, 0x924 }, { 0x492, 0x248, 0x492 }, { 0x252, 0x928, 0x292 }, { 0x94a, 0x4a4, 0xa52 }, { 0x52a, 0xaa4, 0x54a }, { 0xaaa, 0x954, 0x4aa }, { 0xaaa, 0x554, 0xaaa }, { 0x555, 0xad4, 0x5aa }, { 0xb55, 0xab4, 0x55a }, { 0x6b5, 0x5ac, 0xb56 }, { 0x5b5, 0xd6c, 0x6d6 }, { 0xb6d, 0xb6a, 0xdb6 }, { 0x76d, 0x6da, 0xbb6 }, { 0xedd, 0xdda, 0x76e }, { 0xddd, 0xbba, 0xeee }, { 0x7bb, 0xf7a, 0xdde }, { 0xf7b, 0xef6, 0x7de }, { 0xdf7, 0xbf6, 0xf7e }, { 0x7f7, 0xfee, 0xefe }, { 0xfdf, 0xfbe, 0x7fe }, { 0xf7f, 0xefe, 0xffe }, { 0xfff, 0xffe, 0xffd }, }; static int xr21v141x_set_baudrate(struct tty_struct *tty, struct usb_serial_port *port) { u32 divisor, baud, idx; u16 tx_mask, rx_mask; int ret; baud = tty->termios.c_ospeed; if (!baud) return 0; baud = clamp(baud, XR21V141X_MIN_SPEED, XR21V141X_MAX_SPEED); divisor = XR_INT_OSC_HZ / baud; idx = ((32 * XR_INT_OSC_HZ) / baud) & 0x1f; tx_mask = xr21v141x_txrx_clk_masks[idx].tx; if (divisor & 0x01) rx_mask = xr21v141x_txrx_clk_masks[idx].rx1; else rx_mask = xr21v141x_txrx_clk_masks[idx].rx0; dev_dbg(&port->dev, "Setting baud rate: %u\n", baud); /* * XR21V141X uses fractional baud rate generator with 48MHz internal * oscillator and 19-bit programmable divisor. So theoretically it can * generate most commonly used baud rates with high accuracy. */ ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_0, divisor & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_1, (divisor >> 8) & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_CLOCK_DIVISOR_2, (divisor >> 16) & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_TX_CLOCK_MASK_0, tx_mask & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_TX_CLOCK_MASK_1, (tx_mask >> 8) & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_RX_CLOCK_MASK_0, rx_mask & 0xff); if (ret) return ret; ret = xr_set_reg_uart(port, XR21V141X_RX_CLOCK_MASK_1, (rx_mask >> 8) & 0xff); if (ret) return ret; tty_encode_baud_rate(tty, baud, baud); return 0; } static void xr_set_flow_mode(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct xr_data *data = usb_get_serial_port_data(port); const struct xr_type *type = data->type; u16 flow, gpio_mode; bool rs485_enabled; int ret; ret = xr_get_reg_uart(port, type->gpio_mode, &gpio_mode); if (ret) return; /* * According to the datasheets, the UART needs to be disabled while * writing to the FLOW_CONTROL register (XR21V141X), or any register * but GPIO_SET, GPIO_CLEAR, TX_BREAK and ERROR_STATUS (XR21B142X). */ xr_uart_disable(port); /* Set GPIO mode for controlling the pins manually by default. */ gpio_mode &= ~XR_GPIO_MODE_SEL_MASK; rs485_enabled = !!(data->rs485.flags & SER_RS485_ENABLED); if (rs485_enabled) { dev_dbg(&port->dev, "Enabling RS-485\n"); gpio_mode |= XR_GPIO_MODE_SEL_RS485; if (data->rs485.flags & SER_RS485_RTS_ON_SEND) gpio_mode &= ~XR_GPIO_MODE_RS485_TX_H; else gpio_mode |= XR_GPIO_MODE_RS485_TX_H; } if (C_CRTSCTS(tty) && C_BAUD(tty) != B0 && !rs485_enabled) { dev_dbg(&port->dev, "Enabling hardware flow ctrl\n"); gpio_mode |= XR_GPIO_MODE_SEL_RTS_CTS; flow = XR_UART_FLOW_MODE_HW; } else if (I_IXON(tty)) { u8 start_char = START_CHAR(tty); u8 stop_char = STOP_CHAR(tty); dev_dbg(&port->dev, "Enabling sw flow ctrl\n"); flow = XR_UART_FLOW_MODE_SW; xr_set_reg_uart(port, type->xon_char, start_char); xr_set_reg_uart(port, type->xoff_char, stop_char); } else { dev_dbg(&port->dev, "Disabling flow ctrl\n"); flow = XR_UART_FLOW_MODE_NONE; } xr_set_reg_uart(port, type->flow_control, flow); xr_set_reg_uart(port, type->gpio_mode, gpio_mode); xr_uart_enable(port); if (C_BAUD(tty) == B0) xr_dtr_rts(port, 0); else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) xr_dtr_rts(port, 1); } static void xr21v141x_set_line_settings(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct ktermios *termios = &tty->termios; u8 bits = 0; int ret; if (!old_termios || (tty->termios.c_ospeed != old_termios->c_ospeed)) xr21v141x_set_baudrate(tty, port); switch (C_CSIZE(tty)) { case CS5: case CS6: /* CS5 and CS6 are not supported, so just restore old setting */ termios->c_cflag &= ~CSIZE; if (old_termios) termios->c_cflag |= old_termios->c_cflag & CSIZE; else termios->c_cflag |= CS8; if (C_CSIZE(tty) == CS7) bits |= XR_UART_DATA_7; else bits |= XR_UART_DATA_8; break; case CS7: bits |= XR_UART_DATA_7; break; case CS8: default: bits |= XR_UART_DATA_8; break; } if (C_PARENB(tty)) { if (C_CMSPAR(tty)) { if (C_PARODD(tty)) bits |= XR_UART_PARITY_MARK; else bits |= XR_UART_PARITY_SPACE; } else { if (C_PARODD(tty)) bits |= XR_UART_PARITY_ODD; else bits |= XR_UART_PARITY_EVEN; } } if (C_CSTOPB(tty)) bits |= XR_UART_STOP_2; else bits |= XR_UART_STOP_1; ret = xr_set_reg_uart(port, XR21V141X_REG_FORMAT, bits); if (ret) return; } static void xr_cdc_set_line_coding(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct xr_data *data = usb_get_serial_port_data(port); struct usb_host_interface *alt = port->serial->interface->cur_altsetting; struct usb_device *udev = port->serial->dev; struct usb_cdc_line_coding *lc; int ret; lc = kzalloc(sizeof(*lc), GFP_KERNEL); if (!lc) return; if (tty->termios.c_ospeed) lc->dwDTERate = cpu_to_le32(tty->termios.c_ospeed); else lc->dwDTERate = cpu_to_le32(9600); if (C_CSTOPB(tty)) lc->bCharFormat = USB_CDC_2_STOP_BITS; else lc->bCharFormat = USB_CDC_1_STOP_BITS; if (C_PARENB(tty)) { if (C_CMSPAR(tty)) { if (C_PARODD(tty)) lc->bParityType = USB_CDC_MARK_PARITY; else lc->bParityType = USB_CDC_SPACE_PARITY; } else { if (C_PARODD(tty)) lc->bParityType = USB_CDC_ODD_PARITY; else lc->bParityType = USB_CDC_EVEN_PARITY; } } else { lc->bParityType = USB_CDC_NO_PARITY; } if (!data->type->have_5_6_bit_mode && (C_CSIZE(tty) == CS5 || C_CSIZE(tty) == CS6)) { tty->termios.c_cflag &= ~CSIZE; if (old_termios) tty->termios.c_cflag |= old_termios->c_cflag & CSIZE; else tty->termios.c_cflag |= CS8; } switch (C_CSIZE(tty)) { case CS5: lc->bDataBits = 5; break; case CS6: lc->bDataBits = 6; break; case CS7: lc->bDataBits = 7; break; case CS8: default: lc->bDataBits = 8; break; } ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), USB_CDC_REQ_SET_LINE_CODING, USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, alt->desc.bInterfaceNumber, lc, sizeof(*lc), USB_CTRL_SET_TIMEOUT); if (ret < 0) dev_err(&port->dev, "Failed to set line coding: %d\n", ret); kfree(lc); } static void xr_sanitize_serial_rs485(struct serial_rs485 *rs485) { if (!(rs485->flags & SER_RS485_ENABLED)) { memset(rs485, 0, sizeof(*rs485)); return; } /* RTS always toggles after TX */ if (rs485->flags & SER_RS485_RTS_ON_SEND) rs485->flags &= ~SER_RS485_RTS_AFTER_SEND; else rs485->flags |= SER_RS485_RTS_AFTER_SEND; /* Only the flags are implemented at the moment */ rs485->flags &= SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND; rs485->delay_rts_before_send = 0; rs485->delay_rts_after_send = 0; memset(rs485->padding, 0, sizeof(rs485->padding)); } static int xr_get_rs485_config(struct tty_struct *tty, struct serial_rs485 __user *argp) { struct usb_serial_port *port = tty->driver_data; struct xr_data *data = usb_get_serial_port_data(port); down_read(&tty->termios_rwsem); if (copy_to_user(argp, &data->rs485, sizeof(data->rs485))) { up_read(&tty->termios_rwsem); return -EFAULT; } up_read(&tty->termios_rwsem); return 0; } static int xr_set_rs485_config(struct tty_struct *tty, struct serial_rs485 __user *argp) { struct usb_serial_port *port = tty->driver_data; struct xr_data *data = usb_get_serial_port_data(port); struct serial_rs485 rs485; if (copy_from_user(&rs485, argp, sizeof(rs485))) return -EFAULT; xr_sanitize_serial_rs485(&rs485); down_write(&tty->termios_rwsem); data->rs485 = rs485; xr_set_flow_mode(tty, port, NULL); up_write(&tty->termios_rwsem); if (copy_to_user(argp, &rs485, sizeof(rs485))) return -EFAULT; return 0; } static int xr_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; switch (cmd) { case TIOCGRS485: return xr_get_rs485_config(tty, argp); case TIOCSRS485: return xr_set_rs485_config(tty, argp); } return -ENOIOCTLCMD; } static void xr_set_termios(struct tty_struct *tty, struct usb_serial_port *port, const struct ktermios *old_termios) { struct xr_data *data = usb_get_serial_port_data(port); /* * XR21V141X does not have a CUSTOM_DRIVER flag and always enters CDC * mode upon receiving CDC requests. */ if (data->type->set_line_settings) data->type->set_line_settings(tty, port, old_termios); else xr_cdc_set_line_coding(tty, port, old_termios); xr_set_flow_mode(tty, port, old_termios); } static int xr_open(struct tty_struct *tty, struct usb_serial_port *port) { int ret; ret = xr_fifo_reset(port); if (ret) return ret; ret = xr_uart_enable(port); if (ret) { dev_err(&port->dev, "Failed to enable UART\n"); return ret; } /* Setup termios */ if (tty) xr_set_termios(tty, port, NULL); ret = usb_serial_generic_open(tty, port); if (ret) { xr_uart_disable(port); return ret; } return 0; } static void xr_close(struct usb_serial_port *port) { usb_serial_generic_close(port); xr_uart_disable(port); } static int xr_probe(struct usb_serial *serial, const struct usb_device_id *id) { struct usb_interface *control = serial->interface; struct usb_host_interface *alt = control->cur_altsetting; struct usb_cdc_parsed_header hdrs; struct usb_cdc_union_desc *desc; struct usb_interface *data; int ret; ret = cdc_parse_cdc_header(&hdrs, control, alt->extra, alt->extralen); if (ret < 0) return -ENODEV; desc = hdrs.usb_cdc_union_desc; if (!desc) return -ENODEV; data = usb_ifnum_to_if(serial->dev, desc->bSlaveInterface0); if (!data) return -ENODEV; ret = usb_serial_claim_interface(serial, data); if (ret) return ret; usb_set_serial_data(serial, (void *)id->driver_info); return 0; } static int xr_gpio_init(struct usb_serial_port *port, const struct xr_type *type) { u16 mask, mode; int ret; /* * Configure all pins as GPIO except for Receive and Transmit Toggle. */ mode = 0; if (type->have_xmit_toggle) mode |= XR_GPIO_MODE_RX_TOGGLE | XR_GPIO_MODE_TX_TOGGLE; ret = xr_set_reg_uart(port, type->gpio_mode, mode); if (ret) return ret; /* * Configure DTR and RTS as outputs and make sure they are deasserted * (active low), and configure RI, CD, DSR and CTS as inputs. */ mask = XR_GPIO_DTR | XR_GPIO_RTS; ret = xr_set_reg_uart(port, type->gpio_direction, mask); if (ret) return ret; ret = xr_set_reg_uart(port, type->gpio_set, mask); if (ret) return ret; return 0; } static int xr_port_probe(struct usb_serial_port *port) { struct usb_interface_descriptor *desc; const struct xr_type *type; struct xr_data *data; enum xr_type_id type_id; int ret; type_id = (int)(unsigned long)usb_get_serial_data(port->serial); type = &xr_types[type_id]; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->type = type; desc = &port->serial->interface->cur_altsetting->desc; if (type_id == XR21V141X) data->channel = desc->bInterfaceNumber / 2; else data->channel = desc->bInterfaceNumber; usb_set_serial_port_data(port, data); if (type->custom_driver) { ret = xr_set_reg_uart(port, type->custom_driver, XR_CUSTOM_DRIVER_ACTIVE); if (ret) goto err_free; } ret = xr_gpio_init(port, type); if (ret) goto err_free; return 0; err_free: kfree(data); return ret; } static void xr_port_remove(struct usb_serial_port *port) { struct xr_data *data = usb_get_serial_port_data(port); kfree(data); } #define XR_DEVICE(vid, pid, type) \ USB_DEVICE_INTERFACE_CLASS((vid), (pid), USB_CLASS_COMM), \ .driver_info = (type) static const struct usb_device_id id_table[] = { { XR_DEVICE(0x04e2, 0x1400, XR2280X) }, { XR_DEVICE(0x04e2, 0x1401, XR2280X) }, { XR_DEVICE(0x04e2, 0x1402, XR2280X) }, { XR_DEVICE(0x04e2, 0x1403, XR2280X) }, { XR_DEVICE(0x04e2, 0x1410, XR21V141X) }, { XR_DEVICE(0x04e2, 0x1411, XR21B1411) }, { XR_DEVICE(0x04e2, 0x1412, XR21V141X) }, { XR_DEVICE(0x04e2, 0x1414, XR21V141X) }, { XR_DEVICE(0x04e2, 0x1420, XR21B142X) }, { XR_DEVICE(0x04e2, 0x1422, XR21B142X) }, { XR_DEVICE(0x04e2, 0x1424, XR21B142X) }, { } }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_serial_driver xr_device = { .driver = { .name = "xr_serial", }, .id_table = id_table, .num_ports = 1, .probe = xr_probe, .port_probe = xr_port_probe, .port_remove = xr_port_remove, .open = xr_open, .close = xr_close, .break_ctl = xr_break_ctl, .set_termios = xr_set_termios, .tiocmget = xr_tiocmget, .tiocmset = xr_tiocmset, .ioctl = xr_ioctl, .dtr_rts = xr_dtr_rts }; static struct usb_serial_driver * const serial_drivers[] = { &xr_device, NULL }; module_usb_serial_driver(serial_drivers, id_table); MODULE_AUTHOR("Manivannan Sadhasivam <mani@kernel.org>"); MODULE_DESCRIPTION("MaxLinear/Exar USB to Serial driver"); MODULE_LICENSE("GPL"); |
| 3445 2084 301 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_RCULIST_BL_H #define _LINUX_RCULIST_BL_H /* * RCU-protected bl list version. See include/linux/list_bl.h. */ #include <linux/list_bl.h> #include <linux/rcupdate.h> static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h, struct hlist_bl_node *n) { LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != LIST_BL_LOCKMASK); rcu_assign_pointer(h->first, (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK)); } static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) { return (struct hlist_bl_node *) ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK); } /** * hlist_bl_del_rcu - deletes entry from hash list without re-initialization * @n: the element to delete from the hash list. * * Note: hlist_bl_unhashed() on entry does not return true after this, * the entry is in an undefined state. It is useful for RCU based * lockfree traversal. * * In particular, it means that we can not poison the forward * pointers that may still be used for walking the hash list. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_bl_add_head_rcu() * or hlist_bl_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_bl_for_each_entry(). */ static inline void hlist_bl_del_rcu(struct hlist_bl_node *n) { __hlist_bl_del(n); n->pprev = LIST_POISON2; } /** * hlist_bl_add_head_rcu * @n: the element to add to the hash list. * @h: the list to add to. * * Description: * Adds the specified element to the specified hlist_bl, * while permitting racing traversals. * * The caller must take whatever precautions are necessary * (such as holding appropriate locks) to avoid racing * with another list-mutation primitive, such as hlist_bl_add_head_rcu() * or hlist_bl_del_rcu(), running on this same list. * However, it is perfectly legal to run concurrently with * the _rcu list-traversal primitives, such as * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency * problems on Alpha CPUs. Regardless of the type of CPU, the * list-traversal primitive must be guarded by rcu_read_lock(). */ static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n, struct hlist_bl_head *h) { struct hlist_bl_node *first; /* don't need hlist_bl_first_rcu because we're under lock */ first = hlist_bl_first(h); n->next = first; if (first) first->pprev = &n->next; n->pprev = &h->first; /* need _rcu because we can have concurrent lock free readers */ hlist_bl_set_first_rcu(h, n); } /** * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type * @tpos: the type * to use as a loop cursor. * @pos: the &struct hlist_bl_node to use as a loop cursor. * @head: the head for your list. * @member: the name of the hlist_bl_node within the struct. * */ #define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \ for (pos = hlist_bl_first_rcu(head); \ pos && \ ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(pos->next)) #endif |
| 71 72 71 72 3 3 2 2 8 8 8 8 3 3 3 3 69 70 7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 | /* * Copyright (c) 2014 Samsung Electronics Co., Ltd * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/debugfs.h> #include <linux/err.h> #include <linux/export.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/mutex.h> #include <drm/drm_atomic_state_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_debugfs.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> #include <drm/drm_file.h> #include <drm/drm_of.h> #include <drm/drm_print.h> #include "drm_crtc_internal.h" /** * DOC: overview * * &struct drm_bridge represents a device that hangs on to an encoder. These are * handy when a regular &drm_encoder entity isn't enough to represent the entire * encoder chain. * * A bridge is always attached to a single &drm_encoder at a time, but can be * either connected to it directly, or through a chain of bridges:: * * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B * * Here, the output of the encoder feeds to bridge A, and that furthers feeds to * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear: * Chaining multiple bridges to the output of a bridge, or the same bridge to * the output of different bridges, is not supported. * * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes, * CRTCs, encoders or connectors and hence are not visible to userspace. They * just provide additional hooks to get the desired output at the end of the * encoder chain. */ /** * DOC: display driver integration * * Display drivers are responsible for linking encoders with the first bridge * in the chains. This is done by acquiring the appropriate bridge with * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the * encoder with a call to drm_bridge_attach(). * * Bridges are responsible for linking themselves with the next bridge in the * chain, if any. This is done the same way as for encoders, with the call to * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation. * * Once these links are created, the bridges can participate along with encoder * functions to perform mode validation and fixup (through * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode * setting (through drm_bridge_chain_mode_set()), enable (through * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable()) * and disable (through drm_atomic_bridge_chain_disable() and * drm_atomic_bridge_chain_post_disable()). Those functions call the * corresponding operations provided in &drm_bridge_funcs in sequence for all * bridges in the chain. * * For display drivers that use the atomic helpers * drm_atomic_helper_check_modeset(), * drm_atomic_helper_commit_modeset_enables() and * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled * commit check and commit tail handlers, or through the higher-level * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and * requires no intervention from the driver. For other drivers, the relevant * DRM bridge chain functions shall be called manually. * * Bridges also participate in implementing the &drm_connector at the end of * the bridge chain. Display drivers may use the drm_bridge_connector_init() * helper to create the &drm_connector, or implement it manually on top of the * connector-related operations exposed by the bridge (see the overview * documentation of bridge operations for more details). */ /** * DOC: special care dsi * * The interaction between the bridges and other frameworks involved in * the probing of the upstream driver and the bridge driver can be * challenging. Indeed, there's multiple cases that needs to be * considered: * * - The upstream driver doesn't use the component framework and isn't a * MIPI-DSI host. In this case, the bridge driver will probe at some * point and the upstream driver should try to probe again by returning * EPROBE_DEFER as long as the bridge driver hasn't probed. * * - The upstream driver doesn't use the component framework, but is a * MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be * controlled. In this case, the bridge device is a child of the * display device and when it will probe it's assured that the display * device (and MIPI-DSI host) is present. The upstream driver will be * assured that the bridge driver is connected between the * &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations. * Therefore, it must run mipi_dsi_host_register() in its probe * function, and then run drm_bridge_attach() in its * &mipi_dsi_host_ops.attach hook. * * - The upstream driver uses the component framework and is a MIPI-DSI * host. The bridge device uses the MIPI-DCS commands to be * controlled. This is the same situation than above, and can run * mipi_dsi_host_register() in either its probe or bind hooks. * * - The upstream driver uses the component framework and is a MIPI-DSI * host. The bridge device uses a separate bus (such as I2C) to be * controlled. In this case, there's no correlation between the probe * of the bridge and upstream drivers, so care must be taken to avoid * an endless EPROBE_DEFER loop, with each driver waiting for the * other to probe. * * The ideal pattern to cover the last item (and all the others in the * MIPI-DSI host driver case) is to split the operations like this: * * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its * probe hook. It will make sure that the MIPI-DSI host sticks around, * and that the driver's bind can be called. * * - In its probe hook, the bridge driver must try to find its MIPI-DSI * host, register as a MIPI-DSI device and attach the MIPI-DSI device * to its host. The bridge driver is now functional. * * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can * now add its component. Its bind hook will now be called and since * the bridge driver is attached and registered, we can now look for * and attach it. * * At this point, we're now certain that both the upstream driver and * the bridge driver are functional and we can't have a deadlock-like * situation when probing. */ /** * DOC: dsi bridge operations * * DSI host interfaces are expected to be implemented as bridges rather than * encoders, however there are a few aspects of their operation that need to * be defined in order to provide a consistent interface. * * A DSI host should keep the PHY powered down until the pre_enable operation is * called. All lanes are in an undefined idle state up to this point, and it * must not be assumed that it is LP-11. * pre_enable should initialise the PHY, set the data lanes to LP-11, and the * clock lane to either LP-11 or HS depending on the mode_flag * %MIPI_DSI_CLOCK_NON_CONTINUOUS. * * Ordinarily the downstream bridge DSI peripheral pre_enable will have been * called before the DSI host. If the DSI peripheral requires LP-11 and/or * the clock lane to be in HS mode prior to pre_enable, then it can set the * &pre_enable_prev_first flag to request the pre_enable (and * post_disable) order to be altered to enable the DSI host first. * * Either the CRTC being enabled, or the DSI host enable operation should switch * the host to actively transmitting video on the data lanes. * * The reverse also applies. The DSI host disable operation or stopping the CRTC * should stop transmitting video, and the data lanes should return to the LP-11 * state. The DSI host &post_disable operation should disable the PHY. * If the &pre_enable_prev_first flag is set, then the DSI peripheral's * bridge &post_disable will be called before the DSI host's post_disable. * * Whilst it is valid to call &host_transfer prior to pre_enable or after * post_disable, the exact state of the lanes is undefined at this point. The * DSI host should initialise the interface, transmit the data, and then disable * the interface again. * * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If * implemented, it therefore needs to be handled entirely within the DSI Host * driver. */ static DEFINE_MUTEX(bridge_lock); static LIST_HEAD(bridge_list); static void __drm_bridge_free(struct kref *kref) { struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount); if (bridge->funcs->destroy) bridge->funcs->destroy(bridge); kfree(bridge->container); } /** * drm_bridge_get - Acquire a bridge reference * @bridge: DRM bridge * * This function increments the bridge's refcount. * * Returns: * Pointer to @bridge. */ struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge) { if (bridge) kref_get(&bridge->refcount); return bridge; } EXPORT_SYMBOL(drm_bridge_get); /** * drm_bridge_put - Release a bridge reference * @bridge: DRM bridge * * This function decrements the bridge's reference count and frees the * object if the reference count drops to zero. */ void drm_bridge_put(struct drm_bridge *bridge) { if (bridge) kref_put(&bridge->refcount, __drm_bridge_free); } EXPORT_SYMBOL(drm_bridge_put); /** * drm_bridge_put_void - wrapper to drm_bridge_put() taking a void pointer * * @data: pointer to @struct drm_bridge, cast to a void pointer * * Wrapper of drm_bridge_put() to be used when a function taking a void * pointer is needed, for example as a devm action. */ static void drm_bridge_put_void(void *data) { struct drm_bridge *bridge = (struct drm_bridge *)data; drm_bridge_put(bridge); } void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset, const struct drm_bridge_funcs *funcs) { void *container; struct drm_bridge *bridge; int err; if (!funcs) { dev_warn(dev, "Missing funcs pointer\n"); return ERR_PTR(-EINVAL); } container = kzalloc(size, GFP_KERNEL); if (!container) return ERR_PTR(-ENOMEM); bridge = container + offset; bridge->container = container; bridge->funcs = funcs; kref_init(&bridge->refcount); err = devm_add_action_or_reset(dev, drm_bridge_put_void, bridge); if (err) return ERR_PTR(err); return container; } EXPORT_SYMBOL(__devm_drm_bridge_alloc); /** * drm_bridge_add - add the given bridge to the global bridge list * * @bridge: bridge control structure * * The bridge to be added must have been allocated by * devm_drm_bridge_alloc(). */ void drm_bridge_add(struct drm_bridge *bridge) { if (!bridge->container) DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n"); drm_bridge_get(bridge); mutex_init(&bridge->hpd_mutex); if (bridge->ops & DRM_BRIDGE_OP_HDMI) bridge->ycbcr_420_allowed = !!(bridge->supported_formats & BIT(HDMI_COLORSPACE_YUV420)); mutex_lock(&bridge_lock); list_add_tail(&bridge->list, &bridge_list); mutex_unlock(&bridge_lock); } EXPORT_SYMBOL(drm_bridge_add); static void drm_bridge_remove_void(void *bridge) { drm_bridge_remove(bridge); } /** * devm_drm_bridge_add - devm managed version of drm_bridge_add() * * @dev: device to tie the bridge lifetime to * @bridge: bridge control structure * * This is the managed version of drm_bridge_add() which automatically * calls drm_bridge_remove() when @dev is unbound. * * Return: 0 if no error or negative error code. */ int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge) { drm_bridge_add(bridge); return devm_add_action_or_reset(dev, drm_bridge_remove_void, bridge); } EXPORT_SYMBOL(devm_drm_bridge_add); /** * drm_bridge_remove - remove the given bridge from the global bridge list * * @bridge: bridge control structure */ void drm_bridge_remove(struct drm_bridge *bridge) { mutex_lock(&bridge_lock); list_del_init(&bridge->list); mutex_unlock(&bridge_lock); mutex_destroy(&bridge->hpd_mutex); drm_bridge_put(bridge); } EXPORT_SYMBOL(drm_bridge_remove); static struct drm_private_state * drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj) { struct drm_bridge *bridge = drm_priv_to_bridge(obj); struct drm_bridge_state *state; state = bridge->funcs->atomic_duplicate_state(bridge); return state ? &state->base : NULL; } static void drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj, struct drm_private_state *s) { struct drm_bridge_state *state = drm_priv_to_bridge_state(s); struct drm_bridge *bridge = drm_priv_to_bridge(obj); bridge->funcs->atomic_destroy_state(bridge, state); } static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = { .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state, .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state, }; static bool drm_bridge_is_atomic(struct drm_bridge *bridge) { return bridge->funcs->atomic_reset != NULL; } /** * drm_bridge_attach - attach the bridge to an encoder's chain * * @encoder: DRM encoder * @bridge: bridge to attach * @previous: previous bridge in the chain (optional) * @flags: DRM_BRIDGE_ATTACH_* flags * * Called by a kms driver to link the bridge to an encoder's chain. The previous * argument specifies the previous bridge in the chain. If NULL, the bridge is * linked directly at the encoder's output. Otherwise it is linked at the * previous bridge's output. * * If non-NULL the previous bridge must be already attached by a call to this * function. * * Note that bridges attached to encoders are auto-detached during encoder * cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally * *not* be balanced with a drm_bridge_detach() in driver code. * * RETURNS: * Zero on success, error code on failure */ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, struct drm_bridge *previous, enum drm_bridge_attach_flags flags) { int ret; if (!encoder || !bridge) return -EINVAL; drm_bridge_get(bridge); if (previous && (!previous->dev || previous->encoder != encoder)) { ret = -EINVAL; goto err_put_bridge; } if (bridge->dev) { ret = -EBUSY; goto err_put_bridge; } bridge->dev = encoder->dev; bridge->encoder = encoder; if (previous) list_add(&bridge->chain_node, &previous->chain_node); else list_add(&bridge->chain_node, &encoder->bridge_chain); if (bridge->funcs->attach) { ret = bridge->funcs->attach(bridge, encoder, flags); if (ret < 0) goto err_reset_bridge; } if (drm_bridge_is_atomic(bridge)) { struct drm_bridge_state *state; state = bridge->funcs->atomic_reset(bridge); if (IS_ERR(state)) { ret = PTR_ERR(state); goto err_detach_bridge; } drm_atomic_private_obj_init(bridge->dev, &bridge->base, &state->base, &drm_bridge_priv_state_funcs); } return 0; err_detach_bridge: if (bridge->funcs->detach) bridge->funcs->detach(bridge); err_reset_bridge: bridge->dev = NULL; bridge->encoder = NULL; list_del(&bridge->chain_node); if (ret != -EPROBE_DEFER) DRM_ERROR("failed to attach bridge %pOF to encoder %s: %d\n", bridge->of_node, encoder->name, ret); else dev_err_probe(encoder->dev->dev, -EPROBE_DEFER, "failed to attach bridge %pOF to encoder %s\n", bridge->of_node, encoder->name); err_put_bridge: drm_bridge_put(bridge); return ret; } EXPORT_SYMBOL(drm_bridge_attach); void drm_bridge_detach(struct drm_bridge *bridge) { if (WARN_ON(!bridge)) return; if (WARN_ON(!bridge->dev)) return; if (drm_bridge_is_atomic(bridge)) drm_atomic_private_obj_fini(&bridge->base); if (bridge->funcs->detach) bridge->funcs->detach(bridge); list_del(&bridge->chain_node); bridge->dev = NULL; drm_bridge_put(bridge); } /** * DOC: bridge operations * * Bridge drivers expose operations through the &drm_bridge_funcs structure. * The DRM internals (atomic and CRTC helpers) use the helpers defined in * drm_bridge.c to call bridge operations. Those operations are divided in * three big categories to support different parts of the bridge usage. * * - The encoder-related operations support control of the bridges in the * chain, and are roughly counterparts to the &drm_encoder_helper_funcs * operations. They are used by the legacy CRTC and the atomic modeset * helpers to perform mode validation, fixup and setting, and enable and * disable the bridge automatically. * * The enable and disable operations are split in * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable, * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide * finer-grained control. * * Bridge drivers may implement the legacy version of those operations, or * the atomic version (prefixed with atomic\_), in which case they shall also * implement the atomic state bookkeeping operations * (&drm_bridge_funcs.atomic_duplicate_state, * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset). * Mixing atomic and non-atomic versions of the operations is not supported. * * - The bus format negotiation operations * &drm_bridge_funcs.atomic_get_output_bus_fmts and * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to * negotiate the formats transmitted between bridges in the chain when * multiple formats are supported. Negotiation for formats is performed * transparently for display drivers by the atomic modeset helpers. Only * atomic versions of those operations exist, bridge drivers that need to * implement them shall thus also implement the atomic version of the * encoder-related operations. This feature is not supported by the legacy * CRTC helpers. * * - The connector-related operations support implementing a &drm_connector * based on a chain of bridges. DRM bridges traditionally create a * &drm_connector for bridges meant to be used at the end of the chain. This * puts additional burden on bridge drivers, especially for bridges that may * be used in the middle of a chain or at the end of it. Furthermore, it * requires all operations of the &drm_connector to be handled by a single * bridge, which doesn't always match the hardware architecture. * * To simplify bridge drivers and make the connector implementation more * flexible, a new model allows bridges to unconditionally skip creation of * &drm_connector and instead expose &drm_bridge_funcs operations to support * an externally-implemented &drm_connector. Those operations are * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes, * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify, * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When * implemented, display drivers shall create a &drm_connector instance for * each chain of bridges, and implement those connector instances based on * the bridge connector operations. * * Bridge drivers shall implement the connector-related operations for all * the features that the bridge hardware support. For instance, if a bridge * supports reading EDID, the &drm_bridge_funcs.get_edid shall be * implemented. This however doesn't mean that the DDC lines are wired to the * bridge on a particular platform, as they could also be connected to an I2C * controller of the SoC. Support for the connector-related operations on the * running platform is reported through the &drm_bridge.ops flags. Bridge * drivers shall detect which operations they can support on the platform * (usually this information is provided by ACPI or DT), and set the * &drm_bridge.ops flags for all supported operations. A flag shall only be * set if the corresponding &drm_bridge_funcs operation is implemented, but * an implemented operation doesn't necessarily imply that the corresponding * flag will be set. Display drivers shall use the &drm_bridge.ops flags to * decide which bridge to delegate a connector operation to. This mechanism * allows providing a single static const &drm_bridge_funcs instance in * bridge drivers, improving security by storing function pointers in * read-only memory. * * In order to ease transition, bridge drivers may support both the old and * new models by making connector creation optional and implementing the * connected-related bridge operations. Connector creation is then controlled * by the flags argument to the drm_bridge_attach() function. Display drivers * that support the new model and create connectors themselves shall set the * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip * connector creation. For intermediate bridges in the chain, the flag shall * be passed to the drm_bridge_attach() call for the downstream bridge. * Bridge drivers that implement the new model only shall return an error * from their &drm_bridge_funcs.attach handler when the * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers * should use the new model, and convert the bridge drivers they use if * needed, in order to gradually transition to the new model. */ /** * drm_bridge_chain_mode_valid - validate the mode against all bridges in the * encoder chain. * @bridge: bridge control structure * @info: display info against which the mode shall be validated * @mode: desired mode to be validated * * Calls &drm_bridge_funcs.mode_valid for all the bridges in the encoder * chain, starting from the first bridge to the last. If at least one bridge * does not accept the mode the function returns the error code. * * Note: the bridge passed should be the one closest to the encoder. * * RETURNS: * MODE_OK on success, drm_mode_status Enum error code on failure */ enum drm_mode_status drm_bridge_chain_mode_valid(struct drm_bridge *bridge, const struct drm_display_info *info, const struct drm_display_mode *mode) { struct drm_encoder *encoder; if (!bridge) return MODE_OK; encoder = bridge->encoder; list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { enum drm_mode_status ret; if (!bridge->funcs->mode_valid) continue; ret = bridge->funcs->mode_valid(bridge, info, mode); if (ret != MODE_OK) return ret; } return MODE_OK; } EXPORT_SYMBOL(drm_bridge_chain_mode_valid); /** * drm_bridge_chain_mode_set - set proposed mode for all bridges in the * encoder chain * @bridge: bridge control structure * @mode: desired mode to be set for the encoder chain * @adjusted_mode: updated mode that works for this encoder chain * * Calls &drm_bridge_funcs.mode_set op for all the bridges in the * encoder chain, starting from the first bridge to the last. * * Note: the bridge passed should be the one closest to the encoder */ void drm_bridge_chain_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { struct drm_encoder *encoder; if (!bridge) return; encoder = bridge->encoder; list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { if (bridge->funcs->mode_set) bridge->funcs->mode_set(bridge, mode, adjusted_mode); } } EXPORT_SYMBOL(drm_bridge_chain_mode_set); /** * drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain * @bridge: bridge control structure * @state: atomic state being committed * * Calls &drm_bridge_funcs.atomic_disable (falls back on * &drm_bridge_funcs.disable) op for all the bridges in the encoder chain, * starting from the last bridge to the first. These are called before calling * &drm_encoder_helper_funcs.atomic_disable * * Note: the bridge passed should be the one closest to the encoder */ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_encoder *encoder; struct drm_bridge *iter; if (!bridge) return; encoder = bridge->encoder; list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { if (iter->funcs->atomic_disable) { iter->funcs->atomic_disable(iter, state); } else if (iter->funcs->disable) { iter->funcs->disable(iter); } if (iter == bridge) break; } } EXPORT_SYMBOL(drm_atomic_bridge_chain_disable); static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge, struct drm_atomic_state *state) { if (state && bridge->funcs->atomic_post_disable) bridge->funcs->atomic_post_disable(bridge, state); else if (bridge->funcs->post_disable) bridge->funcs->post_disable(bridge); } /** * drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges * in the encoder chain * @bridge: bridge control structure * @state: atomic state being committed * * Calls &drm_bridge_funcs.atomic_post_disable (falls back on * &drm_bridge_funcs.post_disable) op for all the bridges in the encoder chain, * starting from the first bridge to the last. These are called after completing * &drm_encoder_helper_funcs.atomic_disable * * If a bridge sets @pre_enable_prev_first, then the @post_disable for that * bridge will be called before the previous one to reverse the @pre_enable * calling direction. * * Example: * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E * * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting * @post_disable order would be, * Bridge B, Bridge A, Bridge E, Bridge D, Bridge C. * * Note: the bridge passed should be the one closest to the encoder */ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_encoder *encoder; struct drm_bridge *next, *limit; if (!bridge) return; encoder = bridge->encoder; list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { limit = NULL; if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) { next = list_next_entry(bridge, chain_node); if (next->pre_enable_prev_first) { /* next bridge had requested that prev * was enabled first, so disabled last */ limit = next; /* Find the next bridge that has NOT requested * prev to be enabled first / disabled last */ list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) { if (!next->pre_enable_prev_first) { next = list_prev_entry(next, chain_node); limit = next; break; } if (list_is_last(&next->chain_node, &encoder->bridge_chain)) { limit = next; break; } } /* Call these bridges in reverse order */ list_for_each_entry_from_reverse(next, &encoder->bridge_chain, chain_node) { if (next == bridge) break; drm_atomic_bridge_call_post_disable(next, state); } } } drm_atomic_bridge_call_post_disable(bridge, state); if (limit) /* Jump all bridges that we have already post_disabled */ bridge = limit; } } EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable); static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { if (state && bridge->funcs->atomic_pre_enable) bridge->funcs->atomic_pre_enable(bridge, state); else if (bridge->funcs->pre_enable) bridge->funcs->pre_enable(bridge); } /** * drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in * the encoder chain * @bridge: bridge control structure * @state: atomic state being committed * * Calls &drm_bridge_funcs.atomic_pre_enable (falls back on * &drm_bridge_funcs.pre_enable) op for all the bridges in the encoder chain, * starting from the last bridge to the first. These are called before calling * &drm_encoder_helper_funcs.atomic_enable * * If a bridge sets @pre_enable_prev_first, then the pre_enable for the * prev bridge will be called before pre_enable of this bridge. * * Example: * Bridge A ---> Bridge B ---> Bridge C ---> Bridge D ---> Bridge E * * With pre_enable_prev_first flag enable in Bridge B, D, E then the resulting * @pre_enable order would be, * Bridge C, Bridge D, Bridge E, Bridge A, Bridge B. * * Note: the bridge passed should be the one closest to the encoder */ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_encoder *encoder; struct drm_bridge *iter, *next, *limit; if (!bridge) return; encoder = bridge->encoder; list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { if (iter->pre_enable_prev_first) { next = iter; limit = bridge; list_for_each_entry_from_reverse(next, &encoder->bridge_chain, chain_node) { if (next == bridge) break; if (!next->pre_enable_prev_first) { /* Found first bridge that does NOT * request prev to be enabled first */ limit = next; break; } } list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) { /* Call requested prev bridge pre_enable * in order. */ if (next == iter) /* At the first bridge to request prev * bridges called first. */ break; drm_atomic_bridge_call_pre_enable(next, state); } } drm_atomic_bridge_call_pre_enable(iter, state); if (iter->pre_enable_prev_first) /* Jump all bridges that we have already pre_enabled */ iter = limit; if (iter == bridge) break; } } EXPORT_SYMBOL(drm_atomic_bridge_chain_pre_enable); /** * drm_atomic_bridge_chain_enable - enables all bridges in the encoder chain * @bridge: bridge control structure * @state: atomic state being committed * * Calls &drm_bridge_funcs.atomic_enable (falls back on * &drm_bridge_funcs.enable) op for all the bridges in the encoder chain, * starting from the first bridge to the last. These are called after completing * &drm_encoder_helper_funcs.atomic_enable * * Note: the bridge passed should be the one closest to the encoder */ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { struct drm_encoder *encoder; if (!bridge) return; encoder = bridge->encoder; list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) { if (bridge->funcs->atomic_enable) { bridge->funcs->atomic_enable(bridge, state); } else if (bridge->funcs->enable) { bridge->funcs->enable(bridge); } } } EXPORT_SYMBOL(drm_atomic_bridge_chain_enable); static int drm_atomic_bridge_check(struct drm_bridge *bridge, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { if (bridge->funcs->atomic_check) { struct drm_bridge_state *bridge_state; int ret; bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, bridge); if (WARN_ON(!bridge_state)) return -EINVAL; ret = bridge->funcs->atomic_check(bridge, bridge_state, crtc_state, conn_state); if (ret) return ret; } else if (bridge->funcs->mode_fixup) { if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode, &crtc_state->adjusted_mode)) return -EINVAL; } return 0; } static int select_bus_fmt_recursive(struct drm_bridge *first_bridge, struct drm_bridge *cur_bridge, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state, u32 out_bus_fmt) { unsigned int i, num_in_bus_fmts = 0; struct drm_bridge_state *cur_state; struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(cur_bridge); u32 *in_bus_fmts; int ret; cur_state = drm_atomic_get_new_bridge_state(crtc_state->state, cur_bridge); /* * If bus format negotiation is not supported by this bridge, let's * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and * hope that it can handle this situation gracefully (by providing * appropriate default values). */ if (!cur_bridge->funcs->atomic_get_input_bus_fmts) { if (cur_bridge != first_bridge) { ret = select_bus_fmt_recursive(first_bridge, prev_bridge, crtc_state, conn_state, MEDIA_BUS_FMT_FIXED); if (ret) return ret; } /* * Driver does not implement the atomic state hooks, but that's * fine, as long as it does not access the bridge state. */ if (cur_state) { cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED; cur_state->output_bus_cfg.format = out_bus_fmt; } return 0; } /* * If the driver implements ->atomic_get_input_bus_fmts() it * should also implement the atomic state hooks. */ if (WARN_ON(!cur_state)) return -EINVAL; in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge, cur_state, crtc_state, conn_state, out_bus_fmt, &num_in_bus_fmts); if (!num_in_bus_fmts) return -ENOTSUPP; else if (!in_bus_fmts) return -ENOMEM; if (first_bridge == cur_bridge) { cur_state->input_bus_cfg.format = in_bus_fmts[0]; cur_state->output_bus_cfg.format = out_bus_fmt; kfree(in_bus_fmts); return 0; } for (i = 0; i < num_in_bus_fmts; i++) { ret = select_bus_fmt_recursive(first_bridge, prev_bridge, crtc_state, conn_state, in_bus_fmts[i]); if (ret != -ENOTSUPP) break; } if (!ret) { cur_state->input_bus_cfg.format = in_bus_fmts[i]; cur_state->output_bus_cfg.format = out_bus_fmt; } kfree(in_bus_fmts); return ret; } /* * This function is called by &drm_atomic_bridge_chain_check() just before * calling &drm_bridge_funcs.atomic_check() on all elements of the chain. * It performs bus format negotiation between bridge elements. The negotiation * happens in reverse order, starting from the last element in the chain up to * @bridge. * * Negotiation starts by retrieving supported output bus formats on the last * bridge element and testing them one by one. The test is recursive, meaning * that for each tested output format, the whole chain will be walked backward, * and each element will have to choose an input bus format that can be * transcoded to the requested output format. When a bridge element does not * support transcoding into a specific output format -ENOTSUPP is returned and * the next bridge element will have to try a different format. If none of the * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail. * * This implementation is relying on * &drm_bridge_funcs.atomic_get_output_bus_fmts() and * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported * input/output formats. * * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts() * tries a single format: &drm_connector.display_info.bus_formats[0] if * available, MEDIA_BUS_FMT_FIXED otherwise. * * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented, * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the * bridge element that lacks this hook and asks the previous element in the * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what * to do in that case (fail if they want to enforce bus format negotiation, or * provide a reasonable default if they need to support pipelines where not * all elements support bus format negotiation). */ static int drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_connector *conn = conn_state->connector; struct drm_encoder *encoder = bridge->encoder; struct drm_bridge_state *last_bridge_state; unsigned int i, num_out_bus_fmts = 0; struct drm_bridge *last_bridge; u32 *out_bus_fmts; int ret = 0; last_bridge = list_last_entry(&encoder->bridge_chain, struct drm_bridge, chain_node); last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state, last_bridge); if (last_bridge->funcs->atomic_get_output_bus_fmts) { const struct drm_bridge_funcs *funcs = last_bridge->funcs; /* * If the driver implements ->atomic_get_output_bus_fmts() it * should also implement the atomic state hooks. */ if (WARN_ON(!last_bridge_state)) return -EINVAL; out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge, last_bridge_state, crtc_state, conn_state, &num_out_bus_fmts); if (!num_out_bus_fmts) return -ENOTSUPP; else if (!out_bus_fmts) return -ENOMEM; } else { num_out_bus_fmts = 1; out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL); if (!out_bus_fmts) return -ENOMEM; if (conn->display_info.num_bus_formats && conn->display_info.bus_formats) out_bus_fmts[0] = conn->display_info.bus_formats[0]; else out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED; } for (i = 0; i < num_out_bus_fmts; i++) { ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state, conn_state, out_bus_fmts[i]); if (ret != -ENOTSUPP) break; } kfree(out_bus_fmts); return ret; } static void drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge, struct drm_connector *conn, struct drm_atomic_state *state) { struct drm_bridge_state *bridge_state, *next_bridge_state; struct drm_bridge *next_bridge; u32 output_flags = 0; bridge_state = drm_atomic_get_new_bridge_state(state, bridge); /* No bridge state attached to this bridge => nothing to propagate. */ if (!bridge_state) return; next_bridge = drm_bridge_get_next_bridge(bridge); /* * Let's try to apply the most common case here, that is, propagate * display_info flags for the last bridge, and propagate the input * flags of the next bridge element to the output end of the current * bridge when the bridge is not the last one. * There are exceptions to this rule, like when signal inversion is * happening at the board level, but that's something drivers can deal * with from their &drm_bridge_funcs.atomic_check() implementation by * simply overriding the flags value we've set here. */ if (!next_bridge) { output_flags = conn->display_info.bus_flags; } else { next_bridge_state = drm_atomic_get_new_bridge_state(state, next_bridge); /* * No bridge state attached to the next bridge, just leave the * flags to 0. */ if (next_bridge_state) output_flags = next_bridge_state->input_bus_cfg.flags; } bridge_state->output_bus_cfg.flags = output_flags; /* * Propagate the output flags to the input end of the bridge. Again, it's * not necessarily what all bridges want, but that's what most of them * do, and by doing that by default we avoid forcing drivers to * duplicate the "dummy propagation" logic. */ bridge_state->input_bus_cfg.flags = output_flags; } /** * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain * @bridge: bridge control structure * @crtc_state: new CRTC state * @conn_state: new connector state * * First trigger a bus format negotiation before calling * &drm_bridge_funcs.atomic_check() (falls back on * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain, * starting from the last bridge to the first. These are called before calling * &drm_encoder_helper_funcs.atomic_check() * * RETURNS: * 0 on success, a negative error code on failure */ int drm_atomic_bridge_chain_check(struct drm_bridge *bridge, struct drm_crtc_state *crtc_state, struct drm_connector_state *conn_state) { struct drm_connector *conn = conn_state->connector; struct drm_encoder *encoder; struct drm_bridge *iter; int ret; if (!bridge) return 0; ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state, conn_state); if (ret) return ret; encoder = bridge->encoder; list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) { int ret; /* * Bus flags are propagated by default. If a bridge needs to * tweak the input bus flags for any reason, it should happen * in its &drm_bridge_funcs.atomic_check() implementation such * that preceding bridges in the chain can propagate the new * bus flags. */ drm_atomic_bridge_propagate_bus_flags(iter, conn, crtc_state->state); ret = drm_atomic_bridge_check(iter, crtc_state, conn_state); if (ret) return ret; if (iter == bridge) break; } return 0; } EXPORT_SYMBOL(drm_atomic_bridge_chain_check); /** * drm_bridge_detect - check if anything is attached to the bridge output * @bridge: bridge control structure * @connector: attached connector * * If the bridge supports output detection, as reported by the * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the * bridge and return the connection status. Otherwise return * connector_status_unknown. * * RETURNS: * The detection status on success, or connector_status_unknown if the bridge * doesn't support output detection. */ enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { if (!(bridge->ops & DRM_BRIDGE_OP_DETECT)) return connector_status_unknown; return bridge->funcs->detect(bridge, connector); } EXPORT_SYMBOL_GPL(drm_bridge_detect); /** * drm_bridge_get_modes - fill all modes currently valid for the sink into the * @connector * @bridge: bridge control structure * @connector: the connector to fill with modes * * If the bridge supports output modes retrieval, as reported by the * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to * fill the connector with all valid modes and return the number of modes * added. Otherwise return 0. * * RETURNS: * The number of modes added to the connector. */ int drm_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) { if (!(bridge->ops & DRM_BRIDGE_OP_MODES)) return 0; return bridge->funcs->get_modes(bridge, connector); } EXPORT_SYMBOL_GPL(drm_bridge_get_modes); /** * drm_bridge_edid_read - read the EDID data of the connected display * @bridge: bridge control structure * @connector: the connector to read EDID for * * If the bridge supports output EDID retrieval, as reported by the * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.edid_read to get * the EDID and return it. Otherwise return NULL. * * RETURNS: * The retrieved EDID on success, or NULL otherwise. */ const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { if (!(bridge->ops & DRM_BRIDGE_OP_EDID)) return NULL; return bridge->funcs->edid_read(bridge, connector); } EXPORT_SYMBOL_GPL(drm_bridge_edid_read); /** * drm_bridge_hpd_enable - enable hot plug detection for the bridge * @bridge: bridge control structure * @cb: hot-plug detection callback * @data: data to be passed to the hot-plug detection callback * * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb * and @data as hot plug notification callback. From now on the @cb will be * called with @data when an output status change is detected by the bridge, * until hot plug notification gets disabled with drm_bridge_hpd_disable(). * * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in * bridge->ops. This function shall not be called when the flag is not set. * * Only one hot plug detection callback can be registered at a time, it is an * error to call this function when hot plug detection is already enabled for * the bridge. */ void drm_bridge_hpd_enable(struct drm_bridge *bridge, void (*cb)(void *data, enum drm_connector_status status), void *data) { if (!(bridge->ops & DRM_BRIDGE_OP_HPD)) return; mutex_lock(&bridge->hpd_mutex); if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n")) goto unlock; bridge->hpd_cb = cb; bridge->hpd_data = data; if (bridge->funcs->hpd_enable) bridge->funcs->hpd_enable(bridge); unlock: mutex_unlock(&bridge->hpd_mutex); } EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable); /** * drm_bridge_hpd_disable - disable hot plug detection for the bridge * @bridge: bridge control structure * * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot * plug detection callback previously registered with drm_bridge_hpd_enable(). * Once this function returns the callback will not be called by the bridge * when an output status change occurs. * * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in * bridge->ops. This function shall not be called when the flag is not set. */ void drm_bridge_hpd_disable(struct drm_bridge *bridge) { if (!(bridge->ops & DRM_BRIDGE_OP_HPD)) return; mutex_lock(&bridge->hpd_mutex); if (bridge->funcs->hpd_disable) bridge->funcs->hpd_disable(bridge); bridge->hpd_cb = NULL; bridge->hpd_data = NULL; mutex_unlock(&bridge->hpd_mutex); } EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable); /** * drm_bridge_hpd_notify - notify hot plug detection events * @bridge: bridge control structure * @status: output connection status * * Bridge drivers shall call this function to report hot plug events when they * detect a change in the output status, when hot plug detection has been * enabled by drm_bridge_hpd_enable(). * * This function shall be called in a context that can sleep. */ void drm_bridge_hpd_notify(struct drm_bridge *bridge, enum drm_connector_status status) { mutex_lock(&bridge->hpd_mutex); if (bridge->hpd_cb) bridge->hpd_cb(bridge->hpd_data, status); mutex_unlock(&bridge->hpd_mutex); } EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify); #ifdef CONFIG_OF /** * of_drm_find_bridge - find the bridge corresponding to the device node in * the global bridge list * * @np: device node * * RETURNS: * drm_bridge control struct on success, NULL on failure */ struct drm_bridge *of_drm_find_bridge(struct device_node *np) { struct drm_bridge *bridge; mutex_lock(&bridge_lock); list_for_each_entry(bridge, &bridge_list, list) { if (bridge->of_node == np) { mutex_unlock(&bridge_lock); return bridge; } } mutex_unlock(&bridge_lock); return NULL; } EXPORT_SYMBOL(of_drm_find_bridge); #endif /** * devm_drm_put_bridge - Release a bridge reference obtained via devm * @dev: device that got the bridge via devm * @bridge: pointer to a struct drm_bridge obtained via devm * * Same as drm_bridge_put() for bridge pointers obtained via devm functions * such as devm_drm_bridge_alloc(). * * This function is a temporary workaround and MUST NOT be used. Manual * handling of bridge lifetime is inherently unsafe. */ void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge) { devm_release_action(dev, drm_bridge_put_void, bridge); } EXPORT_SYMBOL(devm_drm_put_bridge); static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, struct drm_bridge *bridge, unsigned int idx) { drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); drm_printf(p, "\ttype: [%d] %s\n", bridge->type, drm_get_connector_type_name(bridge->type)); if (bridge->of_node) drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node); drm_printf(p, "\tops: [0x%x]", bridge->ops); if (bridge->ops & DRM_BRIDGE_OP_DETECT) drm_puts(p, " detect"); if (bridge->ops & DRM_BRIDGE_OP_EDID) drm_puts(p, " edid"); if (bridge->ops & DRM_BRIDGE_OP_HPD) drm_puts(p, " hpd"); if (bridge->ops & DRM_BRIDGE_OP_MODES) drm_puts(p, " modes"); if (bridge->ops & DRM_BRIDGE_OP_HDMI) drm_puts(p, " hdmi"); drm_puts(p, "\n"); } static int allbridges_show(struct seq_file *m, void *data) { struct drm_printer p = drm_seq_file_printer(m); struct drm_bridge *bridge; unsigned int idx = 0; mutex_lock(&bridge_lock); list_for_each_entry(bridge, &bridge_list, list) drm_bridge_debugfs_show_bridge(&p, bridge, idx++); mutex_unlock(&bridge_lock); return 0; } DEFINE_SHOW_ATTRIBUTE(allbridges); static int encoder_bridges_show(struct seq_file *m, void *data) { struct drm_encoder *encoder = m->private; struct drm_printer p = drm_seq_file_printer(m); struct drm_bridge *bridge; unsigned int idx = 0; drm_for_each_bridge_in_chain(encoder, bridge) drm_bridge_debugfs_show_bridge(&p, bridge, idx++); return 0; } DEFINE_SHOW_ATTRIBUTE(encoder_bridges); void drm_bridge_debugfs_params(struct dentry *root) { debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops); } void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder) { /* bridges list */ debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops); } MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>"); MODULE_DESCRIPTION("DRM bridge infrastructure"); MODULE_LICENSE("GPL and additional rights"); |
| 1 12 12 12 12 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 | // SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/netfilter.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/percpu.h> #include <linux/netdevice.h> #include <linux/security.h> #include <net/net_namespace.h> #ifdef CONFIG_SYSCTL #include <linux/sysctl.h> #endif #include <net/netfilter/nf_log.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_expect.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_acct.h> #include <net/netfilter/nf_conntrack_zones.h> #include <net/netfilter/nf_conntrack_timestamp.h> #include <linux/rculist_nulls.h> static bool enable_hooks __read_mostly; MODULE_PARM_DESC(enable_hooks, "Always enable conntrack hooks"); module_param(enable_hooks, bool, 0000); unsigned int nf_conntrack_net_id __read_mostly; #ifdef CONFIG_NF_CONNTRACK_PROCFS void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l4proto *l4proto) { switch (tuple->src.l3num) { case NFPROTO_IPV4: seq_printf(s, "src=%pI4 dst=%pI4 ", &tuple->src.u3.ip, &tuple->dst.u3.ip); break; case NFPROTO_IPV6: seq_printf(s, "src=%pI6 dst=%pI6 ", tuple->src.u3.ip6, tuple->dst.u3.ip6); break; default: break; } switch (l4proto->l4proto) { case IPPROTO_ICMP: seq_printf(s, "type=%u code=%u id=%u ", tuple->dst.u.icmp.type, tuple->dst.u.icmp.code, ntohs(tuple->src.u.icmp.id)); break; case IPPROTO_TCP: seq_printf(s, "sport=%hu dport=%hu ", ntohs(tuple->src.u.tcp.port), ntohs(tuple->dst.u.tcp.port)); break; case IPPROTO_UDPLITE: case IPPROTO_UDP: seq_printf(s, "sport=%hu dport=%hu ", ntohs(tuple->src.u.udp.port), ntohs(tuple->dst.u.udp.port)); break; case IPPROTO_SCTP: seq_printf(s, "sport=%hu dport=%hu ", ntohs(tuple->src.u.sctp.port), ntohs(tuple->dst.u.sctp.port)); break; case IPPROTO_ICMPV6: seq_printf(s, "type=%u code=%u id=%u ", tuple->dst.u.icmp.type, tuple->dst.u.icmp.code, ntohs(tuple->src.u.icmp.id)); break; case IPPROTO_GRE: seq_printf(s, "srckey=0x%x dstkey=0x%x ", ntohs(tuple->src.u.gre.key), ntohs(tuple->dst.u.gre.key)); break; default: break; } } EXPORT_SYMBOL_GPL(print_tuple); struct ct_iter_state { struct seq_net_private p; struct hlist_nulls_head *hash; unsigned int htable_size; unsigned int skip_elems; unsigned int bucket; u_int64_t time_now; }; static struct nf_conntrack_tuple_hash *ct_get_next(const struct net *net, struct ct_iter_state *st) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; unsigned int i; for (i = st->bucket; i < st->htable_size; i++) { unsigned int skip = 0; restart: hlist_nulls_for_each_entry_rcu(h, n, &st->hash[i], hnnode) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); struct hlist_nulls_node *tmp = n; if (!net_eq(net, nf_ct_net(ct))) continue; if (++skip <= st->skip_elems) continue; /* h should be returned, skip to nulls marker. */ while (!is_a_nulls(tmp)) tmp = rcu_dereference(hlist_nulls_next_rcu(tmp)); /* check if h is still linked to hash[i] */ if (get_nulls_value(tmp) != i) { skip = 0; goto restart; } st->skip_elems = skip; st->bucket = i; return h; } skip = 0; if (get_nulls_value(n) != i) goto restart; st->skip_elems = 0; } st->bucket = i; return NULL; } static void *ct_seq_start(struct seq_file *seq, loff_t *pos) __acquires(RCU) { struct ct_iter_state *st = seq->private; struct net *net = seq_file_net(seq); st->time_now = ktime_get_real_ns(); rcu_read_lock(); nf_conntrack_get_ht(&st->hash, &st->htable_size); if (*pos == 0) { st->skip_elems = 0; st->bucket = 0; } else if (st->skip_elems) { /* resume from last dumped entry */ st->skip_elems--; } return ct_get_next(net, st); } static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct ct_iter_state *st = s->private; struct net *net = seq_file_net(s); (*pos)++; return ct_get_next(net, st); } static void ct_seq_stop(struct seq_file *s, void *v) __releases(RCU) { rcu_read_unlock(); } #ifdef CONFIG_NF_CONNTRACK_SECMARK static void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) { struct lsm_context ctx; int ret; ret = security_secid_to_secctx(ct->secmark, &ctx); if (ret < 0) return; seq_printf(s, "secctx=%s ", ctx.context); security_release_secctx(&ctx); } #else static inline void ct_show_secctx(struct seq_file *s, const struct nf_conn *ct) { } #endif #ifdef CONFIG_NF_CONNTRACK_ZONES static void ct_show_zone(struct seq_file *s, const struct nf_conn *ct, int dir) { const struct nf_conntrack_zone *zone = nf_ct_zone(ct); if (zone->dir != dir) return; switch (zone->dir) { case NF_CT_DEFAULT_ZONE_DIR: seq_printf(s, "zone=%u ", zone->id); break; case NF_CT_ZONE_DIR_ORIG: seq_printf(s, "zone-orig=%u ", zone->id); break; case NF_CT_ZONE_DIR_REPL: seq_printf(s, "zone-reply=%u ", zone->id); break; default: break; } } #else static inline void ct_show_zone(struct seq_file *s, const struct nf_conn *ct, int dir) { } #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP static void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) { struct ct_iter_state *st = s->private; struct nf_conn_tstamp *tstamp; s64 delta_time; tstamp = nf_conn_tstamp_find(ct); if (tstamp) { delta_time = st->time_now - tstamp->start; if (delta_time > 0) delta_time = div_s64(delta_time, NSEC_PER_SEC); else delta_time = 0; seq_printf(s, "delta-time=%llu ", (unsigned long long)delta_time); } return; } #else static inline void ct_show_delta_time(struct seq_file *s, const struct nf_conn *ct) { } #endif static const char* l3proto_name(u16 proto) { switch (proto) { case AF_INET: return "ipv4"; case AF_INET6: return "ipv6"; } return "unknown"; } static const char* l4proto_name(u16 proto) { switch (proto) { case IPPROTO_ICMP: return "icmp"; case IPPROTO_TCP: return "tcp"; case IPPROTO_UDP: return "udp"; case IPPROTO_GRE: return "gre"; case IPPROTO_SCTP: return "sctp"; case IPPROTO_UDPLITE: return "udplite"; case IPPROTO_ICMPV6: return "icmpv6"; } return "unknown"; } static void seq_print_acct(struct seq_file *s, const struct nf_conn *ct, int dir) { struct nf_conn_acct *acct; struct nf_conn_counter *counter; acct = nf_conn_acct_find(ct); if (!acct) return; counter = acct->counter; seq_printf(s, "packets=%llu bytes=%llu ", (unsigned long long)atomic64_read(&counter[dir].packets), (unsigned long long)atomic64_read(&counter[dir].bytes)); } /* return 0 on success, 1 in case of error */ static int ct_seq_show(struct seq_file *s, void *v) { struct nf_conntrack_tuple_hash *hash = v; struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); const struct nf_conntrack_l4proto *l4proto; struct net *net = seq_file_net(s); int ret = 0; WARN_ON(!ct); if (unlikely(!refcount_inc_not_zero(&ct->ct_general.use))) return 0; /* load ->status after refcount increase */ smp_acquire__after_ctrl_dep(); if (nf_ct_should_gc(ct)) { nf_ct_kill(ct); goto release; } /* we only want to print DIR_ORIGINAL */ if (NF_CT_DIRECTION(hash)) goto release; if (!net_eq(nf_ct_net(ct), net)) goto release; l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct)); ret = -ENOSPC; seq_printf(s, "%-8s %u %-8s %u ", l3proto_name(nf_ct_l3num(ct)), nf_ct_l3num(ct), l4proto_name(l4proto->l4proto), nf_ct_protonum(ct)); if (!test_bit(IPS_OFFLOAD_BIT, &ct->status)) seq_printf(s, "%ld ", nf_ct_expires(ct) / HZ); if (l4proto->print_conntrack) l4proto->print_conntrack(s, ct); print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, l4proto); ct_show_zone(s, ct, NF_CT_ZONE_DIR_ORIG); if (seq_has_overflowed(s)) goto release; seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL); if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) seq_puts(s, "[UNREPLIED] "); print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, l4proto); ct_show_zone(s, ct, NF_CT_ZONE_DIR_REPL); seq_print_acct(s, ct, IP_CT_DIR_REPLY); if (test_bit(IPS_HW_OFFLOAD_BIT, &ct->status)) seq_puts(s, "[HW_OFFLOAD] "); else if (test_bit(IPS_OFFLOAD_BIT, &ct->status)) seq_puts(s, "[OFFLOAD] "); else if (test_bit(IPS_ASSURED_BIT, &ct->status)) seq_puts(s, "[ASSURED] "); if (seq_has_overflowed(s)) goto release; #if defined(CONFIG_NF_CONNTRACK_MARK) seq_printf(s, "mark=%u ", READ_ONCE(ct->mark)); #endif ct_show_secctx(s, ct); ct_show_zone(s, ct, NF_CT_DEFAULT_ZONE_DIR); ct_show_delta_time(s, ct); seq_printf(s, "use=%u\n", refcount_read(&ct->ct_general.use)); if (seq_has_overflowed(s)) goto release; ret = 0; release: nf_ct_put(ct); return ret; } static const struct seq_operations ct_seq_ops = { .start = ct_seq_start, .next = ct_seq_next, .stop = ct_seq_stop, .show = ct_seq_show }; static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) { struct net *net = seq_file_net(seq); int cpu; if (*pos == 0) return SEQ_START_TOKEN; for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu + 1; return per_cpu_ptr(net->ct.stat, cpu); } return NULL; } static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct net *net = seq_file_net(seq); int cpu; for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu + 1; return per_cpu_ptr(net->ct.stat, cpu); } (*pos)++; return NULL; } static void ct_cpu_seq_stop(struct seq_file *seq, void *v) { } static int ct_cpu_seq_show(struct seq_file *seq, void *v) { struct net *net = seq_file_net(seq); const struct ip_conntrack_stat *st = v; unsigned int nr_conntracks; if (v == SEQ_START_TOKEN) { seq_puts(seq, "entries clashres found new invalid ignore delete chainlength insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n"); return 0; } nr_conntracks = nf_conntrack_count(net); seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x " "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", nr_conntracks, st->clash_resolve, st->found, 0, st->invalid, 0, 0, st->chaintoolong, st->insert, st->insert_failed, st->drop, st->early_drop, st->error, st->expect_new, st->expect_create, st->expect_delete, st->search_restart ); return 0; } static const struct seq_operations ct_cpu_seq_ops = { .start = ct_cpu_seq_start, .next = ct_cpu_seq_next, .stop = ct_cpu_seq_stop, .show = ct_cpu_seq_show, }; static int nf_conntrack_standalone_init_proc(struct net *net) { struct proc_dir_entry *pde; kuid_t root_uid; kgid_t root_gid; pde = proc_create_net("nf_conntrack", 0440, net->proc_net, &ct_seq_ops, sizeof(struct ct_iter_state)); if (!pde) goto out_nf_conntrack; root_uid = make_kuid(net->user_ns, 0); root_gid = make_kgid(net->user_ns, 0); if (uid_valid(root_uid) && gid_valid(root_gid)) proc_set_user(pde, root_uid, root_gid); pde = proc_create_net("nf_conntrack", 0444, net->proc_net_stat, &ct_cpu_seq_ops, sizeof(struct seq_net_private)); if (!pde) goto out_stat_nf_conntrack; return 0; out_stat_nf_conntrack: remove_proc_entry("nf_conntrack", net->proc_net); out_nf_conntrack: return -ENOMEM; } static void nf_conntrack_standalone_fini_proc(struct net *net) { remove_proc_entry("nf_conntrack", net->proc_net_stat); remove_proc_entry("nf_conntrack", net->proc_net); } #else static int nf_conntrack_standalone_init_proc(struct net *net) { return 0; } static void nf_conntrack_standalone_fini_proc(struct net *net) { } #endif /* CONFIG_NF_CONNTRACK_PROCFS */ u32 nf_conntrack_count(const struct net *net) { const struct nf_conntrack_net *cnet = nf_ct_pernet(net); return atomic_read(&cnet->count); } EXPORT_SYMBOL_GPL(nf_conntrack_count); /* Sysctl support */ #ifdef CONFIG_SYSCTL /* size the user *wants to set */ static unsigned int nf_conntrack_htable_size_user __read_mostly; static int nf_conntrack_hash_sysctl(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret; /* module_param hashsize could have changed value */ nf_conntrack_htable_size_user = nf_conntrack_htable_size; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret < 0 || !write) return ret; /* update ret, we might not be able to satisfy request */ ret = nf_conntrack_hash_resize(nf_conntrack_htable_size_user); /* update it to the actual value used by conntrack */ nf_conntrack_htable_size_user = nf_conntrack_htable_size; return ret; } static int nf_conntrack_log_invalid_sysctl(const struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { int ret, i; ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos); if (ret < 0 || !write) return ret; if (*(u8 *)table->data == 0) return 0; /* Load nf_log_syslog only if no logger is currently registered */ for (i = 0; i < NFPROTO_NUMPROTO; i++) { if (nf_log_is_registered(i)) return 0; } request_module("%s", "nf_log_syslog"); return 0; } static struct ctl_table_header *nf_ct_netfilter_header; enum nf_ct_sysctl_index { NF_SYSCTL_CT_MAX, NF_SYSCTL_CT_COUNT, NF_SYSCTL_CT_BUCKETS, NF_SYSCTL_CT_CHECKSUM, NF_SYSCTL_CT_LOG_INVALID, NF_SYSCTL_CT_EXPECT_MAX, NF_SYSCTL_CT_ACCT, #ifdef CONFIG_NF_CONNTRACK_EVENTS NF_SYSCTL_CT_EVENTS, #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP NF_SYSCTL_CT_TIMESTAMP, #endif NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS, NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK, #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD, #endif NF_SYSCTL_CT_PROTO_TCP_LOOSE, NF_SYSCTL_CT_PROTO_TCP_LIBERAL, NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST, NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS, NF_SYSCTL_CT_PROTO_TIMEOUT_UDP, NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM, #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD, #endif NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP, NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6, #ifdef CONFIG_NF_CT_PROTO_SCTP NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT, #endif #ifdef CONFIG_NF_CT_PROTO_GRE NF_SYSCTL_CT_PROTO_TIMEOUT_GRE, NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM, #endif NF_SYSCTL_CT_LAST_SYSCTL, }; static struct ctl_table nf_ct_sysctl_table[] = { [NF_SYSCTL_CT_MAX] = { .procname = "nf_conntrack_max", .data = &nf_conntrack_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_INT_MAX, }, [NF_SYSCTL_CT_COUNT] = { .procname = "nf_conntrack_count", .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, [NF_SYSCTL_CT_BUCKETS] = { .procname = "nf_conntrack_buckets", .data = &nf_conntrack_htable_size_user, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = nf_conntrack_hash_sysctl, }, [NF_SYSCTL_CT_CHECKSUM] = { .procname = "nf_conntrack_checksum", .data = &init_net.ct.sysctl_checksum, .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, [NF_SYSCTL_CT_LOG_INVALID] = { .procname = "nf_conntrack_log_invalid", .data = &init_net.ct.sysctl_log_invalid, .maxlen = sizeof(u8), .mode = 0644, .proc_handler = nf_conntrack_log_invalid_sysctl, }, [NF_SYSCTL_CT_EXPECT_MAX] = { .procname = "nf_conntrack_expect_max", .data = &nf_ct_expect_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_INT_MAX, }, [NF_SYSCTL_CT_ACCT] = { .procname = "nf_conntrack_acct", .data = &init_net.ct.sysctl_acct, .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, #ifdef CONFIG_NF_CONNTRACK_EVENTS [NF_SYSCTL_CT_EVENTS] = { .procname = "nf_conntrack_events", .data = &init_net.ct.sysctl_events, .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP [NF_SYSCTL_CT_TIMESTAMP] = { .procname = "nf_conntrack_timestamp", .data = &init_net.ct.sysctl_tstamp, .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, #endif [NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC] = { .procname = "nf_conntrack_generic_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_SENT] = { .procname = "nf_conntrack_tcp_timeout_syn_sent", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_SYN_RECV] = { .procname = "nf_conntrack_tcp_timeout_syn_recv", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ESTABLISHED] = { .procname = "nf_conntrack_tcp_timeout_established", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_FIN_WAIT] = { .procname = "nf_conntrack_tcp_timeout_fin_wait", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE_WAIT] = { .procname = "nf_conntrack_tcp_timeout_close_wait", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_LAST_ACK] = { .procname = "nf_conntrack_tcp_timeout_last_ack", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_TIME_WAIT] = { .procname = "nf_conntrack_tcp_timeout_time_wait", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_CLOSE] = { .procname = "nf_conntrack_tcp_timeout_close", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_RETRANS] = { .procname = "nf_conntrack_tcp_timeout_max_retrans", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_UNACK] = { .procname = "nf_conntrack_tcp_timeout_unacknowledged", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) [NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD] = { .procname = "nf_flowtable_tcp_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #endif [NF_SYSCTL_CT_PROTO_TCP_LOOSE] = { .procname = "nf_conntrack_tcp_loose", .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, [NF_SYSCTL_CT_PROTO_TCP_LIBERAL] = { .procname = "nf_conntrack_tcp_be_liberal", .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, [NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = { .procname = "nf_conntrack_tcp_ignore_invalid_rst", .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, [NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS] = { .procname = "nf_conntrack_tcp_max_retrans", .maxlen = sizeof(u8), .mode = 0644, .proc_handler = proc_dou8vec_minmax, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP] = { .procname = "nf_conntrack_udp_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM] = { .procname = "nf_conntrack_udp_timeout_stream", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) [NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD] = { .procname = "nf_flowtable_udp_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #endif [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP] = { .procname = "nf_conntrack_icmp_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6] = { .procname = "nf_conntrack_icmpv6_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #ifdef CONFIG_NF_CT_PROTO_SCTP [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_CLOSED] = { .procname = "nf_conntrack_sctp_timeout_closed", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_WAIT] = { .procname = "nf_conntrack_sctp_timeout_cookie_wait", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_COOKIE_ECHOED] = { .procname = "nf_conntrack_sctp_timeout_cookie_echoed", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ESTABLISHED] = { .procname = "nf_conntrack_sctp_timeout_established", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_SENT] = { .procname = "nf_conntrack_sctp_timeout_shutdown_sent", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD] = { .procname = "nf_conntrack_sctp_timeout_shutdown_recd", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .procname = "nf_conntrack_sctp_timeout_heartbeat_sent", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #endif #ifdef CONFIG_NF_CT_PROTO_GRE [NF_SYSCTL_CT_PROTO_TIMEOUT_GRE] = { .procname = "nf_conntrack_gre_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, [NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM] = { .procname = "nf_conntrack_gre_timeout_stream", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #endif }; static struct ctl_table nf_ct_netfilter_table[] = { { .procname = "nf_conntrack_max", .data = &nf_conntrack_max, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_INT_MAX, }, }; static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net, struct ctl_table *table) { struct nf_tcp_net *tn = nf_tcp_pernet(net); #define XASSIGN(XNAME, tn) \ table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_ ## XNAME].data = \ &(tn)->timeouts[TCP_CONNTRACK_ ## XNAME] XASSIGN(SYN_SENT, tn); XASSIGN(SYN_RECV, tn); XASSIGN(ESTABLISHED, tn); XASSIGN(FIN_WAIT, tn); XASSIGN(CLOSE_WAIT, tn); XASSIGN(LAST_ACK, tn); XASSIGN(TIME_WAIT, tn); XASSIGN(CLOSE, tn); XASSIGN(RETRANS, tn); XASSIGN(UNACK, tn); #undef XASSIGN #define XASSIGN(XNAME, rval) \ table[NF_SYSCTL_CT_PROTO_TCP_ ## XNAME].data = (rval) XASSIGN(LOOSE, &tn->tcp_loose); XASSIGN(LIBERAL, &tn->tcp_be_liberal); XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans); XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst); #undef XASSIGN #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) table[NF_SYSCTL_CT_PROTO_TIMEOUT_TCP_OFFLOAD].data = &tn->offload_timeout; #endif } static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net, struct ctl_table *table) { #ifdef CONFIG_NF_CT_PROTO_SCTP struct nf_sctp_net *sn = nf_sctp_pernet(net); #define XASSIGN(XNAME, sn) \ table[NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_ ## XNAME].data = \ &(sn)->timeouts[SCTP_CONNTRACK_ ## XNAME] XASSIGN(CLOSED, sn); XASSIGN(COOKIE_WAIT, sn); XASSIGN(COOKIE_ECHOED, sn); XASSIGN(ESTABLISHED, sn); XASSIGN(SHUTDOWN_SENT, sn); XASSIGN(SHUTDOWN_RECD, sn); XASSIGN(SHUTDOWN_ACK_SENT, sn); XASSIGN(HEARTBEAT_SENT, sn); #undef XASSIGN #endif } static void nf_conntrack_standalone_init_gre_sysctl(struct net *net, struct ctl_table *table) { #ifdef CONFIG_NF_CT_PROTO_GRE struct nf_gre_net *gn = nf_gre_pernet(net); table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE].data = &gn->timeouts[GRE_CT_UNREPLIED]; table[NF_SYSCTL_CT_PROTO_TIMEOUT_GRE_STREAM].data = &gn->timeouts[GRE_CT_REPLIED]; #endif } static int nf_conntrack_standalone_init_sysctl(struct net *net) { struct nf_conntrack_net *cnet = nf_ct_pernet(net); struct nf_udp_net *un = nf_udp_pernet(net); struct ctl_table *table; BUILD_BUG_ON(ARRAY_SIZE(nf_ct_sysctl_table) != NF_SYSCTL_CT_LAST_SYSCTL); table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table), GFP_KERNEL); if (!table) return -ENOMEM; table[NF_SYSCTL_CT_COUNT].data = &cnet->count; table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct; #ifdef CONFIG_NF_CONNTRACK_EVENTS table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; #endif #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp; #endif table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout; table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMPV6].data = &nf_icmpv6_pernet(net)->timeout; table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP].data = &un->timeouts[UDP_CT_UNREPLIED]; table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_STREAM].data = &un->timeouts[UDP_CT_REPLIED]; #if IS_ENABLED(CONFIG_NF_FLOW_TABLE) table[NF_SYSCTL_CT_PROTO_TIMEOUT_UDP_OFFLOAD].data = &un->offload_timeout; #endif nf_conntrack_standalone_init_tcp_sysctl(net, table); nf_conntrack_standalone_init_sctp_sysctl(net, table); nf_conntrack_standalone_init_gre_sysctl(net, table); /* Don't allow non-init_net ns to alter global sysctls */ if (!net_eq(&init_net, net)) { table[NF_SYSCTL_CT_MAX].mode = 0444; table[NF_SYSCTL_CT_EXPECT_MAX].mode = 0444; table[NF_SYSCTL_CT_BUCKETS].mode = 0444; } cnet->sysctl_header = register_net_sysctl_sz(net, "net/netfilter", table, ARRAY_SIZE(nf_ct_sysctl_table)); if (!cnet->sysctl_header) goto out_unregister_netfilter; return 0; out_unregister_netfilter: kfree(table); return -ENOMEM; } static void nf_conntrack_standalone_fini_sysctl(struct net *net) { struct nf_conntrack_net *cnet = nf_ct_pernet(net); const struct ctl_table *table; table = cnet->sysctl_header->ctl_table_arg; unregister_net_sysctl_table(cnet->sysctl_header); kfree(table); } #else static int nf_conntrack_standalone_init_sysctl(struct net *net) { return 0; } static void nf_conntrack_standalone_fini_sysctl(struct net *net) { } #endif /* CONFIG_SYSCTL */ static void nf_conntrack_fini_net(struct net *net) { if (enable_hooks) nf_ct_netns_put(net, NFPROTO_INET); nf_conntrack_standalone_fini_proc(net); nf_conntrack_standalone_fini_sysctl(net); } static int nf_conntrack_pernet_init(struct net *net) { int ret; net->ct.sysctl_checksum = 1; ret = nf_conntrack_standalone_init_sysctl(net); if (ret < 0) return ret; ret = nf_conntrack_standalone_init_proc(net); if (ret < 0) goto out_proc; ret = nf_conntrack_init_net(net); if (ret < 0) goto out_init_net; if (enable_hooks) { ret = nf_ct_netns_get(net, NFPROTO_INET); if (ret < 0) goto out_hooks; } return 0; out_hooks: nf_conntrack_cleanup_net(net); out_init_net: nf_conntrack_standalone_fini_proc(net); out_proc: nf_conntrack_standalone_fini_sysctl(net); return ret; } static void nf_conntrack_pernet_exit(struct list_head *net_exit_list) { struct net *net; list_for_each_entry(net, net_exit_list, exit_list) nf_conntrack_fini_net(net); nf_conntrack_cleanup_net_list(net_exit_list); } static struct pernet_operations nf_conntrack_net_ops = { .init = nf_conntrack_pernet_init, .exit_batch = nf_conntrack_pernet_exit, .id = &nf_conntrack_net_id, .size = sizeof(struct nf_conntrack_net), }; static int __init nf_conntrack_standalone_init(void) { int ret = nf_conntrack_init_start(); if (ret < 0) goto out_start; BUILD_BUG_ON(NFCT_INFOMASK <= IP_CT_NUMBER); #ifdef CONFIG_SYSCTL nf_ct_netfilter_header = register_net_sysctl(&init_net, "net", nf_ct_netfilter_table); if (!nf_ct_netfilter_header) { pr_err("nf_conntrack: can't register to sysctl.\n"); ret = -ENOMEM; goto out_sysctl; } nf_conntrack_htable_size_user = nf_conntrack_htable_size; #endif nf_conntrack_init_end(); ret = register_pernet_subsys(&nf_conntrack_net_ops); if (ret < 0) goto out_pernet; return 0; out_pernet: #ifdef CONFIG_SYSCTL unregister_net_sysctl_table(nf_ct_netfilter_header); out_sysctl: #endif nf_conntrack_cleanup_end(); out_start: return ret; } static void __exit nf_conntrack_standalone_fini(void) { nf_conntrack_cleanup_start(); unregister_pernet_subsys(&nf_conntrack_net_ops); #ifdef CONFIG_SYSCTL unregister_net_sysctl_table(nf_ct_netfilter_header); #endif nf_conntrack_cleanup_end(); } module_init(nf_conntrack_standalone_init); module_exit(nf_conntrack_standalone_fini); |
| 4 3 3 4 1 3 4 6 6 2 1 2 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 | // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2008-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */ /* Kernel module implementing an IP set type: the list:set type */ #include <linux/module.h> #include <linux/ip.h> #include <linux/rculist.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_list.h> #define IPSET_TYPE_REV_MIN 0 /* 1 Counters support added */ /* 2 Comments support added */ #define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>"); IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_list:set"); /* Member elements */ struct set_elem { struct rcu_head rcu; struct list_head list; struct ip_set *set; /* Sigh, in order to cleanup reference */ ip_set_id_t id; } __aligned(__alignof__(u64)); struct set_adt_elem { ip_set_id_t id; ip_set_id_t refid; int before; }; /* Type structure */ struct list_set { u32 size; /* size of set list array */ struct timer_list gc; /* garbage collection */ struct ip_set *set; /* attached to this ip_set */ struct net *net; /* namespace */ struct list_head members; /* the set members */ }; static int list_set_ktest(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt, const struct ip_set_ext *ext) { struct list_set *map = set->data; struct ip_set_ext *mext = &opt->ext; struct set_elem *e; u32 flags = opt->cmdflags; int ret; /* Don't lookup sub-counters at all */ opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE; list_for_each_entry_rcu(e, &map->members, list) { ret = ip_set_test(e->id, skb, par, opt); if (ret <= 0) continue; if (ip_set_match_extensions(set, ext, mext, flags, e)) return 1; } return 0; } static int list_set_kadd(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt, const struct ip_set_ext *ext) { struct list_set *map = set->data; struct set_elem *e; int ret; list_for_each_entry_rcu(e, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) continue; ret = ip_set_add(e->id, skb, par, opt); if (ret == 0) return ret; } return 0; } static int list_set_kdel(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt, const struct ip_set_ext *ext) { struct list_set *map = set->data; struct set_elem *e; int ret; list_for_each_entry_rcu(e, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) continue; ret = ip_set_del(e->id, skb, par, opt); if (ret == 0) return ret; } return 0; } static int list_set_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); int ret = -EINVAL; rcu_read_lock(); switch (adt) { case IPSET_TEST: ret = list_set_ktest(set, skb, par, opt, &ext); break; case IPSET_ADD: ret = list_set_kadd(set, skb, par, opt, &ext); break; case IPSET_DEL: ret = list_set_kdel(set, skb, par, opt, &ext); break; default: break; } rcu_read_unlock(); return ret; } /* Userspace interfaces: we are protected by the nfnl mutex */ static void __list_set_del_rcu(struct rcu_head * rcu) { struct set_elem *e = container_of(rcu, struct set_elem, rcu); struct ip_set *set = e->set; ip_set_ext_destroy(set, e); kfree(e); } static void list_set_del(struct ip_set *set, struct set_elem *e) { struct list_set *map = set->data; set->elements--; list_del_rcu(&e->list); ip_set_put_byindex(map->net, e->id); call_rcu(&e->rcu, __list_set_del_rcu); } static void list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old) { struct list_set *map = set->data; list_replace_rcu(&old->list, &e->list); ip_set_put_byindex(map->net, old->id); call_rcu(&old->rcu, __list_set_del_rcu); } static void set_cleanup_entries(struct ip_set *set) { struct list_set *map = set->data; struct set_elem *e, *n; list_for_each_entry_safe(e, n, &map->members, list) if (ip_set_timeout_expired(ext_timeout(e, set))) list_set_del(set, e); } static int list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags) { struct list_set *map = set->data; struct set_adt_elem *d = value; struct set_elem *e, *next, *prev = NULL; int ret = 0; rcu_read_lock(); list_for_each_entry_rcu(e, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) continue; else if (e->id != d->id) { prev = e; continue; } if (d->before == 0) { ret = 1; goto out; } else if (d->before > 0) { next = list_next_entry(e, list); ret = !list_is_last(&e->list, &map->members) && next->id == d->refid; } else { ret = prev && prev->id == d->refid; } goto out; } out: rcu_read_unlock(); return ret; } static void list_set_init_extensions(struct ip_set *set, const struct ip_set_ext *ext, struct set_elem *e) { if (SET_WITH_COUNTER(set)) ip_set_init_counter(ext_counter(e, set), ext); if (SET_WITH_COMMENT(set)) ip_set_init_comment(set, ext_comment(e, set), ext); if (SET_WITH_SKBINFO(set)) ip_set_init_skbinfo(ext_skbinfo(e, set), ext); /* Update timeout last */ if (SET_WITH_TIMEOUT(set)) ip_set_timeout_set(ext_timeout(e, set), ext->timeout); } static int list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags) { struct list_set *map = set->data; struct set_adt_elem *d = value; struct set_elem *e, *n, *prev, *next; bool flag_exist = flags & IPSET_FLAG_EXIST; /* Find where to add the new entry */ n = prev = next = NULL; list_for_each_entry_rcu(e, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) continue; else if (d->id == e->id) n = e; else if (d->before == 0 || e->id != d->refid) continue; else if (d->before > 0) next = e; else prev = e; } /* If before/after is used on an empty set */ if ((d->before > 0 && !next) || (d->before < 0 && !prev)) return -IPSET_ERR_REF_EXIST; /* Re-add already existing element */ if (n) { if (!flag_exist) return -IPSET_ERR_EXIST; /* Update extensions */ ip_set_ext_destroy(set, n); list_set_init_extensions(set, ext, n); /* Set is already added to the list */ ip_set_put_byindex(map->net, d->id); return 0; } /* Add new entry */ if (d->before == 0) { /* Append */ n = list_empty(&map->members) ? NULL : list_last_entry(&map->members, struct set_elem, list); } else if (d->before > 0) { /* Insert after next element */ if (!list_is_last(&next->list, &map->members)) n = list_next_entry(next, list); } else { /* Insert before prev element */ if (prev->list.prev != &map->members) n = list_prev_entry(prev, list); } /* Can we replace a timed out entry? */ if (n && !(SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(n, set)))) n = NULL; e = kzalloc(set->dsize, GFP_ATOMIC); if (!e) return -ENOMEM; e->id = d->id; e->set = set; INIT_LIST_HEAD(&e->list); list_set_init_extensions(set, ext, e); if (n) list_set_replace(set, e, n); else if (next) list_add_tail_rcu(&e->list, &next->list); else if (prev) list_add_rcu(&e->list, &prev->list); else list_add_tail_rcu(&e->list, &map->members); set->elements++; return 0; } static int list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags) { struct list_set *map = set->data; struct set_adt_elem *d = value; struct set_elem *e, *n, *next, *prev = NULL; list_for_each_entry_safe(e, n, &map->members, list) { if (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set))) continue; else if (e->id != d->id) { prev = e; continue; } if (d->before > 0) { next = list_next_entry(e, list); if (list_is_last(&e->list, &map->members) || next->id != d->refid) return -IPSET_ERR_REF_EXIST; } else if (d->before < 0) { if (!prev || prev->id != d->refid) return -IPSET_ERR_REF_EXIST; } list_set_del(set, e); return 0; } return d->before != 0 ? -IPSET_ERR_REF_EXIST : -IPSET_ERR_EXIST; } static int list_set_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct list_set *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct set_adt_elem e = { .refid = IPSET_INVALID_ID }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set *s; int ret = 0; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_NAME] || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s); if (e.id == IPSET_INVALID_ID) return -IPSET_ERR_NAME; /* "Loop detection" */ if (s->type->features & IPSET_TYPE_NAME) { ret = -IPSET_ERR_LOOP; goto finish; } if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); e.before = f & IPSET_FLAG_BEFORE; } if (e.before && !tb[IPSET_ATTR_NAMEREF]) { ret = -IPSET_ERR_BEFORE; goto finish; } if (tb[IPSET_ATTR_NAMEREF]) { e.refid = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAMEREF]), &s); if (e.refid == IPSET_INVALID_ID) { ret = -IPSET_ERR_NAMEREF; goto finish; } if (!e.before) e.before = -1; } if (adt != IPSET_TEST && SET_WITH_TIMEOUT(set)) set_cleanup_entries(set); ret = adtfn(set, &e, &ext, &ext, flags); finish: if (e.refid != IPSET_INVALID_ID) ip_set_put_byindex(map->net, e.refid); if (adt != IPSET_ADD || ret) ip_set_put_byindex(map->net, e.id); return ip_set_eexist(ret, flags) ? 0 : ret; } static void list_set_flush(struct ip_set *set) { struct list_set *map = set->data; struct set_elem *e, *n; list_for_each_entry_safe(e, n, &map->members, list) list_set_del(set, e); set->elements = 0; set->ext_size = 0; } static void list_set_destroy(struct ip_set *set) { struct list_set *map = set->data; WARN_ON_ONCE(!list_empty(&map->members)); kfree(map); set->data = NULL; } /* Calculate the actual memory size of the set data */ static size_t list_set_memsize(const struct list_set *map, size_t dsize) { struct set_elem *e; u32 n = 0; rcu_read_lock(); list_for_each_entry_rcu(e, &map->members, list) n++; rcu_read_unlock(); return (sizeof(*map) + n * dsize); } static int list_set_head(struct ip_set *set, struct sk_buff *skb) { const struct list_set *map = set->data; struct nlattr *nested; size_t memsize = list_set_memsize(map, set->dsize) + set->ext_size; nested = nla_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) || nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) || nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) || nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements))) goto nla_put_failure; if (unlikely(ip_set_put_flags(skb, set))) goto nla_put_failure; nla_nest_end(skb, nested); return 0; nla_put_failure: return -EMSGSIZE; } static int list_set_list(const struct ip_set *set, struct sk_buff *skb, struct netlink_callback *cb) { const struct list_set *map = set->data; struct nlattr *atd, *nested; u32 i = 0, first = cb->args[IPSET_CB_ARG0]; char name[IPSET_MAXNAMELEN]; struct set_elem *e; int ret = 0; atd = nla_nest_start(skb, IPSET_ATTR_ADT); if (!atd) return -EMSGSIZE; rcu_read_lock(); list_for_each_entry_rcu(e, &map->members, list) { if (i < first || (SET_WITH_TIMEOUT(set) && ip_set_timeout_expired(ext_timeout(e, set)))) { i++; continue; } nested = nla_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; ip_set_name_byindex(map->net, e->id, name); if (nla_put_string(skb, IPSET_ATTR_NAME, name)) goto nla_put_failure; if (ip_set_put_extensions(skb, set, e, true)) goto nla_put_failure; nla_nest_end(skb, nested); i++; } nla_nest_end(skb, atd); /* Set listing finished */ cb->args[IPSET_CB_ARG0] = 0; goto out; nla_put_failure: nla_nest_cancel(skb, nested); if (unlikely(i == first)) { nla_nest_cancel(skb, atd); cb->args[IPSET_CB_ARG0] = 0; ret = -EMSGSIZE; } else { cb->args[IPSET_CB_ARG0] = i; nla_nest_end(skb, atd); } out: rcu_read_unlock(); return ret; } static bool list_set_same_set(const struct ip_set *a, const struct ip_set *b) { const struct list_set *x = a->data; const struct list_set *y = b->data; return x->size == y->size && a->timeout == b->timeout && a->extensions == b->extensions; } static void list_set_cancel_gc(struct ip_set *set) { struct list_set *map = set->data; if (SET_WITH_TIMEOUT(set)) timer_shutdown_sync(&map->gc); /* Flush list to drop references to other ipsets */ list_set_flush(set); } static const struct ip_set_type_variant set_variant = { .kadt = list_set_kadt, .uadt = list_set_uadt, .adt = { [IPSET_ADD] = list_set_uadd, [IPSET_DEL] = list_set_udel, [IPSET_TEST] = list_set_utest, }, .destroy = list_set_destroy, .flush = list_set_flush, .head = list_set_head, .list = list_set_list, .same_set = list_set_same_set, .cancel_gc = list_set_cancel_gc, }; static void list_set_gc(struct timer_list *t) { struct list_set *map = timer_container_of(map, t, gc); struct ip_set *set = map->set; spin_lock_bh(&set->lock); set_cleanup_entries(set); spin_unlock_bh(&set->lock); map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; add_timer(&map->gc); } static void list_set_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t)) { struct list_set *map = set->data; timer_setup(&map->gc, gc, 0); mod_timer(&map->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ); } /* Create list:set type of sets */ static bool init_list_set(struct net *net, struct ip_set *set, u32 size) { struct list_set *map; map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) return false; map->size = size; map->net = net; map->set = set; INIT_LIST_HEAD(&map->members); set->data = map; return true; } static struct lock_class_key list_set_lockdep_key; static int list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[], u32 flags) { u32 size = IP_SET_LIST_DEFAULT_SIZE; if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_SIZE]) size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]); if (size < IP_SET_LIST_MIN_SIZE) size = IP_SET_LIST_MIN_SIZE; lockdep_set_class(&set->lock, &list_set_lockdep_key); set->variant = &set_variant; set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem), __alignof__(struct set_elem)); if (!init_list_set(net, set, size)) return -ENOMEM; if (tb[IPSET_ATTR_TIMEOUT]) { set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); list_set_gc_init(set, list_set_gc); } return 0; } static struct ip_set_type list_set_type __read_mostly = { .name = "list:set", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST, .dimension = IPSET_DIM_ONE, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create = list_set_create, .create_policy = { [IPSET_ATTR_SIZE] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_NAME] = { .type = NLA_STRING, .len = IPSET_MAXNAMELEN }, [IPSET_ATTR_NAMEREF] = { .type = NLA_STRING, .len = IPSET_MAXNAMELEN }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING, .len = IPSET_MAX_COMMENT_SIZE }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init list_set_init(void) { return ip_set_type_register(&list_set_type); } static void __exit list_set_fini(void) { rcu_barrier(); ip_set_type_unregister(&list_set_type); } module_init(list_set_init); module_exit(list_set_fini); |
| 4 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 | // SPDX-License-Identifier: GPL-2.0-or-later /* * xpress_decompress.c - A decompressor for the XPRESS compression format * (Huffman variant), which can be used in "System Compressed" files. This is * based on the code from wimlib. * * Copyright (C) 2015 Eric Biggers */ #include "decompress_common.h" #include "lib.h" #define XPRESS_NUM_SYMBOLS 512 #define XPRESS_MAX_CODEWORD_LEN 15 #define XPRESS_MIN_MATCH_LEN 3 /* This value is chosen for fast decompression. */ #define XPRESS_TABLEBITS 12 /* Reusable heap-allocated memory for XPRESS decompression */ struct xpress_decompressor { /* The Huffman decoding table */ u16 decode_table[(1 << XPRESS_TABLEBITS) + 2 * XPRESS_NUM_SYMBOLS]; /* An array that maps symbols to codeword lengths */ u8 lens[XPRESS_NUM_SYMBOLS]; /* Temporary space for make_huffman_decode_table() */ u16 working_space[2 * (1 + XPRESS_MAX_CODEWORD_LEN) + XPRESS_NUM_SYMBOLS]; }; /* * xpress_allocate_decompressor - Allocate an XPRESS decompressor * * Return the pointer to the decompressor on success, or return NULL and set * errno on failure. */ struct xpress_decompressor *xpress_allocate_decompressor(void) { return kmalloc(sizeof(struct xpress_decompressor), GFP_NOFS); } /* * xpress_decompress - Decompress a buffer of XPRESS-compressed data * * @decompressor: A decompressor that was allocated with * xpress_allocate_decompressor() * @compressed_data: The buffer of data to decompress * @compressed_size: Number of bytes of compressed data * @uncompressed_data: The buffer in which to store the decompressed data * @uncompressed_size: The number of bytes the data decompresses into * * Return 0 on success, or return -1 and set errno on failure. */ int xpress_decompress(struct xpress_decompressor *decompressor, const void *compressed_data, size_t compressed_size, void *uncompressed_data, size_t uncompressed_size) { struct xpress_decompressor *d = decompressor; const u8 * const in_begin = compressed_data; u8 * const out_begin = uncompressed_data; u8 *out_next = out_begin; u8 * const out_end = out_begin + uncompressed_size; struct input_bitstream is; u32 i; /* Read the Huffman codeword lengths. */ if (compressed_size < XPRESS_NUM_SYMBOLS / 2) goto invalid; for (i = 0; i < XPRESS_NUM_SYMBOLS / 2; i++) { d->lens[i*2 + 0] = in_begin[i] & 0xF; d->lens[i*2 + 1] = in_begin[i] >> 4; } /* Build a decoding table for the Huffman code. */ if (make_huffman_decode_table(d->decode_table, XPRESS_NUM_SYMBOLS, XPRESS_TABLEBITS, d->lens, XPRESS_MAX_CODEWORD_LEN, d->working_space)) goto invalid; /* Decode the matches and literals. */ init_input_bitstream(&is, in_begin + XPRESS_NUM_SYMBOLS / 2, compressed_size - XPRESS_NUM_SYMBOLS / 2); while (out_next != out_end) { u32 sym; u32 log2_offset; u32 length; u32 offset; sym = read_huffsym(&is, d->decode_table, XPRESS_TABLEBITS, XPRESS_MAX_CODEWORD_LEN); if (sym < 256) { /* Literal */ *out_next++ = sym; } else { /* Match */ length = sym & 0xf; log2_offset = (sym >> 4) & 0xf; bitstream_ensure_bits(&is, 16); offset = ((u32)1 << log2_offset) | bitstream_pop_bits(&is, log2_offset); if (length == 0xf) { length += bitstream_read_byte(&is); if (length == 0xf + 0xff) length = bitstream_read_u16(&is); } length += XPRESS_MIN_MATCH_LEN; if (offset > (size_t)(out_next - out_begin)) goto invalid; if (length > (size_t)(out_end - out_next)) goto invalid; out_next = lz_copy(out_next, length, offset, out_end, XPRESS_MIN_MATCH_LEN); } } return 0; invalid: return -1; } /* * xpress_free_decompressor - Free an XPRESS decompressor * * @decompressor: A decompressor that was allocated with * xpress_allocate_decompressor(), or NULL. */ void xpress_free_decompressor(struct xpress_decompressor *decompressor) { kfree(decompressor); } |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_KDEV_T_H #define _LINUX_KDEV_T_H #include <uapi/linux/kdev_t.h> #define MINORBITS 20 #define MINORMASK ((1U << MINORBITS) - 1) #define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS)) #define MINOR(dev) ((unsigned int) ((dev) & MINORMASK)) #define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi)) #define print_dev_t(buffer, dev) \ sprintf((buffer), "%u:%u\n", MAJOR(dev), MINOR(dev)) #define format_dev_t(buffer, dev) \ ({ \ sprintf(buffer, "%u:%u", MAJOR(dev), MINOR(dev)); \ buffer; \ }) /* acceptable for old filesystems */ static __always_inline bool old_valid_dev(dev_t dev) { return MAJOR(dev) < 256 && MINOR(dev) < 256; } static __always_inline u16 old_encode_dev(dev_t dev) { return (MAJOR(dev) << 8) | MINOR(dev); } static __always_inline dev_t old_decode_dev(u16 val) { return MKDEV((val >> 8) & 255, val & 255); } static __always_inline u32 new_encode_dev(dev_t dev) { unsigned major = MAJOR(dev); unsigned minor = MINOR(dev); return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); } static __always_inline dev_t new_decode_dev(u32 dev) { unsigned major = (dev & 0xfff00) >> 8; unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); return MKDEV(major, minor); } static __always_inline u64 huge_encode_dev(dev_t dev) { return new_encode_dev(dev); } static __always_inline dev_t huge_decode_dev(u64 dev) { return new_decode_dev(dev); } static __always_inline int sysv_valid_dev(dev_t dev) { return MAJOR(dev) < (1<<14) && MINOR(dev) < (1<<18); } static __always_inline u32 sysv_encode_dev(dev_t dev) { return MINOR(dev) | (MAJOR(dev) << 18); } static __always_inline unsigned sysv_major(u32 dev) { return (dev >> 18) & 0x3fff; } static __always_inline unsigned sysv_minor(u32 dev) { return dev & 0x3ffff; } #endif |
| 9 5 7 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 | /* SPDX-License-Identifier: GPL-2.0 */ /* * linux/include/linux/relay.h * * Copyright (C) 2002, 2003 - Tom Zanussi (zanussi@us.ibm.com), IBM Corp * Copyright (C) 1999, 2000, 2001, 2002 - Karim Yaghmour (karim@opersys.com) * * CONFIG_RELAY definitions and declarations */ #ifndef _LINUX_RELAY_H #define _LINUX_RELAY_H #include <linux/types.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/wait.h> #include <linux/list.h> #include <linux/irq_work.h> #include <linux/bug.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/kref.h> #include <linux/percpu.h> /* * Tracks changes to rchan/rchan_buf structs */ #define RELAYFS_CHANNEL_VERSION 7 /* * Relay buffer statistics */ enum { RELAY_STATS_BUF_FULL = (1 << 0), RELAY_STATS_WRT_BIG = (1 << 1), RELAY_STATS_LAST = RELAY_STATS_WRT_BIG, }; struct rchan_buf_stats { unsigned int full_count; /* counter for buffer full */ unsigned int big_count; /* counter for too big to write */ }; /* * Per-cpu relay channel buffer */ struct rchan_buf { void *start; /* start of channel buffer */ void *data; /* start of current sub-buffer */ size_t offset; /* current offset into sub-buffer */ size_t subbufs_produced; /* count of sub-buffers produced */ size_t subbufs_consumed; /* count of sub-buffers consumed */ struct rchan *chan; /* associated channel */ wait_queue_head_t read_wait; /* reader wait queue */ struct irq_work wakeup_work; /* reader wakeup */ struct dentry *dentry; /* channel file dentry */ struct kref kref; /* channel buffer refcount */ struct rchan_buf_stats stats; /* buffer stats */ struct page **page_array; /* array of current buffer pages */ unsigned int page_count; /* number of current buffer pages */ unsigned int finalized; /* buffer has been finalized */ size_t *padding; /* padding counts per sub-buffer */ size_t bytes_consumed; /* bytes consumed in cur read subbuf */ size_t early_bytes; /* bytes consumed before VFS inited */ unsigned int cpu; /* this buf's cpu */ } ____cacheline_aligned; /* * Relay channel data structure */ struct rchan { u32 version; /* the version of this struct */ size_t subbuf_size; /* sub-buffer size */ size_t n_subbufs; /* number of sub-buffers per buffer */ size_t alloc_size; /* total buffer size allocated */ const struct rchan_callbacks *cb; /* client callbacks */ struct kref kref; /* channel refcount */ void *private_data; /* for user-defined data */ struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */ int is_global; /* One global buffer ? */ struct list_head list; /* for channel list */ struct dentry *parent; /* parent dentry passed to open */ int has_base_filename; /* has a filename associated? */ char base_filename[NAME_MAX]; /* saved base filename */ }; /* * Relay channel client callbacks */ struct rchan_callbacks { /* * subbuf_start - called on buffer-switch to a new sub-buffer * @buf: the channel buffer containing the new sub-buffer * @subbuf: the start of the new sub-buffer * @prev_subbuf: the start of the previous sub-buffer * * The client should return 1 to continue logging, 0 to stop * logging. * * This callback is optional. * * NOTE: subbuf_start will also be invoked when the buffer is * created, so that the first sub-buffer can be initialized * if necessary. In this case, prev_subbuf will be NULL. * * NOTE: the client can reserve bytes at the beginning of the new * sub-buffer by calling subbuf_start_reserve() in this callback. */ int (*subbuf_start) (struct rchan_buf *buf, void *subbuf, void *prev_subbuf); /* * create_buf_file - create file to represent a relay channel buffer * @filename: the name of the file to create * @parent: the parent of the file to create * @mode: the mode of the file to create * @buf: the channel buffer * @is_global: outparam - set non-zero if the buffer should be global * * Called during relay_open(), once for each per-cpu buffer, * to allow the client to create a file to be used to * represent the corresponding channel buffer. If the file is * created outside of relay, the parent must also exist in * that filesystem. * * The callback should return the dentry of the file created * to represent the relay buffer. * * Setting the is_global outparam to a non-zero value will * cause relay_open() to create a single global buffer rather * than the default set of per-cpu buffers. * * This callback is mandatory. * * See Documentation/filesystems/relay.rst for more info. */ struct dentry *(*create_buf_file)(const char *filename, struct dentry *parent, umode_t mode, struct rchan_buf *buf, int *is_global); /* * remove_buf_file - remove file representing a relay channel buffer * @dentry: the dentry of the file to remove * * Called during relay_close(), once for each per-cpu buffer, * to allow the client to remove a file used to represent a * channel buffer. * * The callback should return 0 if successful, negative if not. * * This callback is mandatory. */ int (*remove_buf_file)(struct dentry *dentry); }; /* * CONFIG_RELAY kernel API, kernel/relay.c */ struct rchan *relay_open(const char *base_filename, struct dentry *parent, size_t subbuf_size, size_t n_subbufs, const struct rchan_callbacks *cb, void *private_data); extern void relay_close(struct rchan *chan); extern void relay_flush(struct rchan *chan); size_t relay_stats(struct rchan *chan, int flags); extern void relay_subbufs_consumed(struct rchan *chan, unsigned int cpu, size_t consumed); extern void relay_reset(struct rchan *chan); extern int relay_buf_full(struct rchan_buf *buf); extern size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length); /** * relay_write - write data into the channel * @chan: relay channel * @data: data to be written * @length: number of bytes to write * * Writes data into the current cpu's channel buffer. * * Protects the buffer by disabling interrupts. Use this * if you might be logging from interrupt context. Try * __relay_write() if you know you won't be logging from * interrupt context. */ static inline void relay_write(struct rchan *chan, const void *data, size_t length) { unsigned long flags; struct rchan_buf *buf; local_irq_save(flags); buf = *this_cpu_ptr(chan->buf); if (unlikely(buf->offset + length > chan->subbuf_size)) length = relay_switch_subbuf(buf, length); memcpy(buf->data + buf->offset, data, length); buf->offset += length; local_irq_restore(flags); } /** * __relay_write - write data into the channel * @chan: relay channel * @data: data to be written * @length: number of bytes to write * * Writes data into the current cpu's channel buffer. * * Protects the buffer by disabling preemption. Use * relay_write() if you might be logging from interrupt * context. */ static inline void __relay_write(struct rchan *chan, const void *data, size_t length) { struct rchan_buf *buf; buf = *get_cpu_ptr(chan->buf); if (unlikely(buf->offset + length > buf->chan->subbuf_size)) length = relay_switch_subbuf(buf, length); memcpy(buf->data + buf->offset, data, length); buf->offset += length; put_cpu_ptr(chan->buf); } /** * relay_reserve - reserve slot in channel buffer * @chan: relay channel * @length: number of bytes to reserve * * Returns pointer to reserved slot, NULL if full. * * Reserves a slot in the current cpu's channel buffer. * Does not protect the buffer at all - caller must provide * appropriate synchronization. */ static inline void *relay_reserve(struct rchan *chan, size_t length) { void *reserved = NULL; struct rchan_buf *buf = *get_cpu_ptr(chan->buf); if (unlikely(buf->offset + length > buf->chan->subbuf_size)) { length = relay_switch_subbuf(buf, length); if (!length) goto end; } reserved = buf->data + buf->offset; buf->offset += length; end: put_cpu_ptr(chan->buf); return reserved; } /** * subbuf_start_reserve - reserve bytes at the start of a sub-buffer * @buf: relay channel buffer * @length: number of bytes to reserve * * Helper function used to reserve bytes at the beginning of * a sub-buffer in the subbuf_start() callback. */ static inline void subbuf_start_reserve(struct rchan_buf *buf, size_t length) { BUG_ON(length >= buf->chan->subbuf_size - 1); buf->offset = length; } /* * exported relay file operations, kernel/relay.c */ extern const struct file_operations relay_file_operations; #ifdef CONFIG_RELAY int relay_prepare_cpu(unsigned int cpu); #else #define relay_prepare_cpu NULL #endif #endif /* _LINUX_RELAY_H */ |
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 | /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_UDF_SB_H #define __LINUX_UDF_SB_H #include <linux/mutex.h> #include <linux/bitops.h> #include <linux/magic.h> /* * Even UDF 2.6 media should have version <= 0x250 but apparently there are * some broken filesystems with version set to 0x260. Accommodate those. */ #define UDF_MAX_READ_VERSION 0x0260 #define UDF_MAX_WRITE_VERSION 0x0201 #define UDF_FLAG_USE_EXTENDED_FE 0 #define UDF_VERS_USE_EXTENDED_FE 0x0200 #define UDF_FLAG_USE_STREAMS 1 #define UDF_VERS_USE_STREAMS 0x0200 #define UDF_FLAG_USE_SHORT_AD 2 #define UDF_FLAG_USE_AD_IN_ICB 3 #define UDF_FLAG_USE_FILE_CTIME_EA 4 #define UDF_FLAG_STRICT 5 #define UDF_FLAG_UNDELETE 6 #define UDF_FLAG_UNHIDE 7 #define UDF_FLAG_NOVRS 8 #define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */ #define UDF_FLAG_GID_FORGET 12 #define UDF_FLAG_UID_SET 13 #define UDF_FLAG_GID_SET 14 #define UDF_FLAG_SESSION_SET 15 #define UDF_FLAG_LASTBLOCK_SET 16 #define UDF_FLAG_BLOCKSIZE_SET 17 #define UDF_FLAG_INCONSISTENT 18 #define UDF_FLAG_RW_INCOMPAT 19 /* Set when we find RW incompatible * feature */ #define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001 #define UDF_PART_FLAG_UNALLOC_TABLE 0x0002 #define UDF_PART_FLAG_READ_ONLY 0x0010 #define UDF_PART_FLAG_WRITE_ONCE 0x0020 #define UDF_PART_FLAG_REWRITABLE 0x0040 #define UDF_PART_FLAG_OVERWRITABLE 0x0080 #define UDF_MAX_BLOCK_LOADED 8 #define UDF_TYPE1_MAP15 0x1511U #define UDF_VIRTUAL_MAP15 0x1512U #define UDF_VIRTUAL_MAP20 0x2012U #define UDF_SPARABLE_MAP15 0x1522U #define UDF_METADATA_MAP25 0x2511U #define UDF_INVALID_MODE ((umode_t)-1) #define MF_DUPLICATE_MD 0x01 #define MF_MIRROR_FE_LOADED 0x02 #define EFSCORRUPTED EUCLEAN struct udf_meta_data { __u32 s_meta_file_loc; __u32 s_mirror_file_loc; __u32 s_bitmap_file_loc; __u32 s_alloc_unit_size; __u16 s_align_unit_size; /* * Partition Reference Number of the associated physical / sparable * partition */ __u16 s_phys_partition_ref; int s_flags; struct inode *s_metadata_fe; struct inode *s_mirror_fe; struct inode *s_bitmap_fe; }; struct udf_sparing_data { __u16 s_packet_len; struct buffer_head *s_spar_map[4]; }; struct udf_virtual_data { __u32 s_num_entries; __u16 s_start_offset; }; struct udf_bitmap { __u32 s_extPosition; int s_nr_groups; struct buffer_head *s_block_bitmap[] __counted_by(s_nr_groups); }; struct udf_part_map { union { struct udf_bitmap *s_bitmap; struct inode *s_table; } s_uspace; __u32 s_partition_root; __u32 s_partition_len; __u16 s_partition_type; __u16 s_partition_num; union { struct udf_sparing_data s_sparing; struct udf_virtual_data s_virtual; struct udf_meta_data s_metadata; } s_type_specific; __u32 (*s_partition_func)(struct super_block *, __u32, __u16, __u32); __u16 s_volumeseqnum; __u16 s_partition_flags; }; #pragma pack() struct udf_sb_info { struct udf_part_map *s_partmaps; __u8 s_volume_ident[32]; /* Overall info */ __u16 s_partitions; __u16 s_partition; /* Sector headers */ __s32 s_session; __u32 s_anchor; __u32 s_last_block; struct buffer_head *s_lvid_bh; /* Default permissions */ umode_t s_umask; kgid_t s_gid; kuid_t s_uid; umode_t s_fmode; umode_t s_dmode; /* Lock protecting consistency of above permission settings */ rwlock_t s_cred_lock; /* Root Info */ struct timespec64 s_record_time; /* Fileset Info */ __u16 s_serial_number; /* highest UDF revision we have recorded to this media */ __u16 s_udfrev; /* Miscellaneous flags */ unsigned long s_flags; /* Encoding info */ struct nls_table *s_nls_map; /* VAT inode */ struct inode *s_vat_inode; struct mutex s_alloc_mutex; /* Protected by s_alloc_mutex */ unsigned int s_lvid_dirty; }; static inline struct udf_sb_info *UDF_SB(struct super_block *sb) { return sb->s_fs_info; } struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb); int udf_compute_nr_groups(struct super_block *sb, u32 partition); static inline int UDF_QUERY_FLAG(struct super_block *sb, int flag) { return test_bit(flag, &UDF_SB(sb)->s_flags); } static inline void UDF_SET_FLAG(struct super_block *sb, int flag) { set_bit(flag, &UDF_SB(sb)->s_flags); } static inline void UDF_CLEAR_FLAG(struct super_block *sb, int flag) { clear_bit(flag, &UDF_SB(sb)->s_flags); } #endif /* __LINUX_UDF_SB_H */ |
| 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 | // SPDX-License-Identifier: GPL-2.0-or-later /* A driver for the D-Link DSB-R100 USB radio and Gemtek USB Radio 21. * The device plugs into both the USB and an analog audio input, so this thing * only deals with initialisation and frequency setting, the * audio data has to be handled by a sound driver. * * Major issue: I can't find out where the device reports the signal * strength, and indeed the windows software appearantly just looks * at the stereo indicator as well. So, scanning will only find * stereo stations. Sad, but I can't help it. * * Also, the windows program sends oodles of messages over to the * device, and I couldn't figure out their meaning. My suspicion * is that they don't have any:-) * * You might find some interesting stuff about this module at * http://unimut.fsk.uni-heidelberg.de/unimut/demi/dsbr * * Fully tested with the Keene USB FM Transmitter and the v4l2-compliance tool. * * Copyright (c) 2000 Markus Demleitner <msdemlei@cl.uni-heidelberg.de> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/videodev2.h> #include <linux/usb.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> /* * Version Information */ MODULE_AUTHOR("Markus Demleitner <msdemlei@tucana.harvard.edu>"); MODULE_DESCRIPTION("D-Link DSB-R100 USB FM radio driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.1.0"); #define DSB100_VENDOR 0x04b4 #define DSB100_PRODUCT 0x1002 /* Commands the device appears to understand */ #define DSB100_TUNE 1 #define DSB100_ONOFF 2 #define TB_LEN 16 /* Frequency limits in MHz -- these are European values. For Japanese devices, that would be 76 and 91. */ #define FREQ_MIN 87.5 #define FREQ_MAX 108.0 #define FREQ_MUL 16000 #define v4l2_dev_to_radio(d) container_of(d, struct dsbr100_device, v4l2_dev) static int radio_nr = -1; module_param(radio_nr, int, 0); /* Data for one (physical) device */ struct dsbr100_device { struct usb_device *usbdev; struct video_device videodev; struct v4l2_device v4l2_dev; struct v4l2_ctrl_handler hdl; u8 *transfer_buffer; struct mutex v4l2_lock; int curfreq; bool stereo; bool muted; }; /* Low-level device interface begins here */ /* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */ static int dsbr100_setfreq(struct dsbr100_device *radio, unsigned freq) { unsigned f = (freq / 16 * 80) / 1000 + 856; int retval = 0; if (!radio->muted) { retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), DSB100_TUNE, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, (f >> 8) & 0x00ff, f & 0xff, radio->transfer_buffer, 8, 300); if (retval >= 0) mdelay(1); } if (retval >= 0) { radio->curfreq = freq; return 0; } dev_err(&radio->usbdev->dev, "%s - usb_control_msg returned %i, request %i\n", __func__, retval, DSB100_TUNE); return retval; } /* switch on radio */ static int dsbr100_start(struct dsbr100_device *radio) { int retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), DSB100_ONOFF, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x01, 0x00, radio->transfer_buffer, 8, 300); if (retval >= 0) return dsbr100_setfreq(radio, radio->curfreq); dev_err(&radio->usbdev->dev, "%s - usb_control_msg returned %i, request %i\n", __func__, retval, DSB100_ONOFF); return retval; } /* switch off radio */ static int dsbr100_stop(struct dsbr100_device *radio) { int retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), DSB100_ONOFF, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x00, 0x00, radio->transfer_buffer, 8, 300); if (retval >= 0) return 0; dev_err(&radio->usbdev->dev, "%s - usb_control_msg returned %i, request %i\n", __func__, retval, DSB100_ONOFF); return retval; } /* return the device status. This is, in effect, just whether it sees a stereo signal or not. Pity. */ static void dsbr100_getstat(struct dsbr100_device *radio) { int retval = usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), USB_REQ_GET_STATUS, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x00, 0x24, radio->transfer_buffer, 8, 300); if (retval < 0) { radio->stereo = false; dev_err(&radio->usbdev->dev, "%s - usb_control_msg returned %i, request %i\n", __func__, retval, USB_REQ_GET_STATUS); } else { radio->stereo = !(radio->transfer_buffer[0] & 0x01); } } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { struct dsbr100_device *radio = video_drvdata(file); strscpy(v->driver, "dsbr100", sizeof(v->driver)); strscpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card)); usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info)); return 0; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct dsbr100_device *radio = video_drvdata(file); if (v->index > 0) return -EINVAL; dsbr100_getstat(radio); strscpy(v->name, "FM", sizeof(v->name)); v->type = V4L2_TUNER_RADIO; v->rangelow = FREQ_MIN * FREQ_MUL; v->rangehigh = FREQ_MAX * FREQ_MUL; v->rxsubchans = radio->stereo ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO; v->capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; v->audmode = V4L2_TUNER_MODE_STEREO; v->signal = radio->stereo ? 0xffff : 0; /* We can't get the signal strength */ return 0; } static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *v) { return v->index ? -EINVAL : 0; } static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct dsbr100_device *radio = video_drvdata(file); if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) return -EINVAL; return dsbr100_setfreq(radio, clamp_t(unsigned, f->frequency, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL)); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct dsbr100_device *radio = video_drvdata(file); if (f->tuner) return -EINVAL; f->type = V4L2_TUNER_RADIO; f->frequency = radio->curfreq; return 0; } static int usb_dsbr100_s_ctrl(struct v4l2_ctrl *ctrl) { struct dsbr100_device *radio = container_of(ctrl->handler, struct dsbr100_device, hdl); switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: radio->muted = ctrl->val; return radio->muted ? dsbr100_stop(radio) : dsbr100_start(radio); } return -EINVAL; } /* USB subsystem interface begins here */ /* * Handle unplugging of the device. * We call video_unregister_device in any case. * The last function called in this procedure is * usb_dsbr100_video_device_release */ static void usb_dsbr100_disconnect(struct usb_interface *intf) { struct dsbr100_device *radio = usb_get_intfdata(intf); mutex_lock(&radio->v4l2_lock); /* * Disconnect is also called on unload, and in that case we need to * mute the device. This call will silently fail if it is called * after a physical disconnect. */ usb_control_msg(radio->usbdev, usb_rcvctrlpipe(radio->usbdev, 0), DSB100_ONOFF, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x00, 0x00, radio->transfer_buffer, 8, 300); usb_set_intfdata(intf, NULL); video_unregister_device(&radio->videodev); v4l2_device_disconnect(&radio->v4l2_dev); mutex_unlock(&radio->v4l2_lock); v4l2_device_put(&radio->v4l2_dev); } /* Suspend device - stop device. */ static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message) { struct dsbr100_device *radio = usb_get_intfdata(intf); mutex_lock(&radio->v4l2_lock); if (!radio->muted && dsbr100_stop(radio) < 0) dev_warn(&intf->dev, "dsbr100_stop failed\n"); mutex_unlock(&radio->v4l2_lock); dev_info(&intf->dev, "going into suspend..\n"); return 0; } /* Resume device - start device. */ static int usb_dsbr100_resume(struct usb_interface *intf) { struct dsbr100_device *radio = usb_get_intfdata(intf); mutex_lock(&radio->v4l2_lock); if (!radio->muted && dsbr100_start(radio) < 0) dev_warn(&intf->dev, "dsbr100_start failed\n"); mutex_unlock(&radio->v4l2_lock); dev_info(&intf->dev, "coming out of suspend..\n"); return 0; } /* free data structures */ static void usb_dsbr100_release(struct v4l2_device *v4l2_dev) { struct dsbr100_device *radio = v4l2_dev_to_radio(v4l2_dev); v4l2_ctrl_handler_free(&radio->hdl); v4l2_device_unregister(&radio->v4l2_dev); kfree(radio->transfer_buffer); kfree(radio); } static const struct v4l2_ctrl_ops usb_dsbr100_ctrl_ops = { .s_ctrl = usb_dsbr100_s_ctrl, }; /* File system interface */ static const struct v4l2_file_operations usb_dsbr100_fops = { .owner = THIS_MODULE, .unlocked_ioctl = video_ioctl2, .open = v4l2_fh_open, .release = v4l2_fh_release, .poll = v4l2_ctrl_poll, }; static const struct v4l2_ioctl_ops usb_dsbr100_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_log_status = v4l2_ctrl_log_status, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; /* check if the device is present and register with v4l and usb if it is */ static int usb_dsbr100_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct dsbr100_device *radio; struct v4l2_device *v4l2_dev; int retval; radio = kzalloc(sizeof(struct dsbr100_device), GFP_KERNEL); if (!radio) return -ENOMEM; radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL); if (!(radio->transfer_buffer)) { kfree(radio); return -ENOMEM; } v4l2_dev = &radio->v4l2_dev; v4l2_dev->release = usb_dsbr100_release; retval = v4l2_device_register(&intf->dev, v4l2_dev); if (retval < 0) { v4l2_err(v4l2_dev, "couldn't register v4l2_device\n"); goto err_reg_dev; } v4l2_ctrl_handler_init(&radio->hdl, 1); v4l2_ctrl_new_std(&radio->hdl, &usb_dsbr100_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 1); if (radio->hdl.error) { retval = radio->hdl.error; v4l2_err(v4l2_dev, "couldn't register control\n"); goto err_reg_ctrl; } mutex_init(&radio->v4l2_lock); strscpy(radio->videodev.name, v4l2_dev->name, sizeof(radio->videodev.name)); radio->videodev.v4l2_dev = v4l2_dev; radio->videodev.fops = &usb_dsbr100_fops; radio->videodev.ioctl_ops = &usb_dsbr100_ioctl_ops; radio->videodev.release = video_device_release_empty; radio->videodev.lock = &radio->v4l2_lock; radio->videodev.ctrl_handler = &radio->hdl; radio->videodev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER; radio->usbdev = interface_to_usbdev(intf); radio->curfreq = FREQ_MIN * FREQ_MUL; radio->muted = true; video_set_drvdata(&radio->videodev, radio); usb_set_intfdata(intf, radio); retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr); if (retval == 0) return 0; v4l2_err(v4l2_dev, "couldn't register video device\n"); err_reg_ctrl: v4l2_ctrl_handler_free(&radio->hdl); v4l2_device_unregister(v4l2_dev); err_reg_dev: kfree(radio->transfer_buffer); kfree(radio); return retval; } static const struct usb_device_id usb_dsbr100_device_table[] = { { USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_dsbr100_device_table); /* USB subsystem interface */ static struct usb_driver usb_dsbr100_driver = { .name = "dsbr100", .probe = usb_dsbr100_probe, .disconnect = usb_dsbr100_disconnect, .id_table = usb_dsbr100_device_table, .suspend = usb_dsbr100_suspend, .resume = usb_dsbr100_resume, .reset_resume = usb_dsbr100_resume, }; module_usb_driver(usb_dsbr100_driver); |
| 2 2 2 1 1 1 1 1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 | /* Upcall routine, designed to work as a key type and working through * /sbin/request-key to contact userspace when handling DNS queries. * * See Documentation/networking/dns_resolver.rst * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov (niallain@gmail.com) * Steve French (sfrench@us.ibm.com) * Wang Lei (wang840925@gmail.com) * David Howells (dhowells@redhat.com) * * The upcall wrapper used to make an arbitrary DNS query. * * This function requires the appropriate userspace tool dns.upcall to be * installed and something like the following lines should be added to the * /etc/request-key.conf file: * * create dns_resolver * * /sbin/dns.upcall %k * * For example to use this module to query AFSDB RR: * * create dns_resolver afsdb:* * /sbin/dns.afsdb %k * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/cred.h> #include <linux/dns_resolver.h> #include <linux/err.h> #include <net/net_namespace.h> #include <keys/dns_resolver-type.h> #include <keys/user-type.h> #include "internal.h" /** * dns_query - Query the DNS * @net: The network namespace to operate in. * @type: Query type (or NULL for straight host->IP lookup) * @name: Name to look up * @namelen: Length of name * @options: Request options (or NULL if no options) * @_result: Where to place the returned data (or NULL) * @_expiry: Where to store the result expiry time (or NULL) * @invalidate: Always invalidate the key after use * * The data will be returned in the pointer at *result, if provided, and the * caller is responsible for freeing it. * * The description should be of the form "[<query_type>:]<domain_name>", and * the options need to be appropriate for the query type requested. If no * query_type is given, then the query is a straight hostname to IP address * lookup. * * The DNS resolution lookup is performed by upcalling to userspace by way of * requesting a key of type dns_resolver. * * Returns the size of the result on success, -ve error code otherwise. */ int dns_query(struct net *net, const char *type, const char *name, size_t namelen, const char *options, char **_result, time64_t *_expiry, bool invalidate) { struct key *rkey; struct user_key_payload *upayload; const struct cred *saved_cred; size_t typelen, desclen; char *desc, *cp; int ret, len; kenter("%s,%*.*s,%zu,%s", type, (int)namelen, (int)namelen, name, namelen, options); if (!name || namelen == 0) return -EINVAL; /* construct the query key description as "[<type>:]<name>" */ typelen = 0; desclen = 0; if (type) { typelen = strlen(type); if (typelen < 1) return -EINVAL; desclen += typelen + 1; } if (namelen < 3 || namelen > 255) return -EINVAL; desclen += namelen + 1; desc = kmalloc(desclen, GFP_KERNEL); if (!desc) return -ENOMEM; cp = desc; if (type) { memcpy(cp, type, typelen); cp += typelen; *cp++ = ':'; } memcpy(cp, name, namelen); cp += namelen; *cp = '\0'; if (!options) options = ""; kdebug("call request_key(,%s,%s)", desc, options); /* make the upcall, using special credentials to prevent the use of * add_key() to preinstall malicious redirections */ saved_cred = override_creds(dns_resolver_cache); rkey = request_key_net(&key_type_dns_resolver, desc, net, options); revert_creds(saved_cred); kfree(desc); if (IS_ERR(rkey)) { ret = PTR_ERR(rkey); goto out; } down_read(&rkey->sem); set_bit(KEY_FLAG_ROOT_CAN_INVAL, &rkey->flags); rkey->perm |= KEY_USR_VIEW; ret = key_validate(rkey); if (ret < 0) goto put; /* If the DNS server gave an error, return that to the caller */ ret = PTR_ERR(rkey->payload.data[dns_key_error]); if (ret) goto put; upayload = user_key_payload_locked(rkey); len = upayload->datalen; if (_result) { ret = -ENOMEM; *_result = kmemdup_nul(upayload->data, len, GFP_KERNEL); if (!*_result) goto put; } if (_expiry) *_expiry = rkey->expiry; ret = len; put: up_read(&rkey->sem); if (invalidate) key_invalidate(rkey); key_put(rkey); out: kleave(" = %d", ret); return ret; } EXPORT_SYMBOL(dns_query); |
| 1 1 1 2 1 1 1 1 7 4 4 7 3 7 5 8 1 1 1 1 9 9 8 8 8 8 9 4 2 5 7 3 1 3 6 4 6 4 7 10 9 2 11 5 1 1 1 3 1 4 2 4 4 2 2 2 4 1 1 6 1 5 4 1 4 1 4 4 1 3 3 6 4 4 4 4 4 3 3 1 1 3 4 4 4 4 4 4 4 12 1 9 12 11 12 12 12 3 1 8 9 9 9 12 10 1 10 1 10 10 10 2 4 4 9 5 4 9 9 9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 | // SPDX-License-Identifier: GPL-2.0-or-later /* * Xbox gamepad driver * * Copyright (c) 2002 Marko Friedemann <mfr@bmx-chemnitz.de> * 2004 Oliver Schwartz <Oliver.Schwartz@gmx.de>, * Steven Toth <steve@toth.demon.co.uk>, * Franz Lehner <franz@caos.at>, * Ivan Hawkes <blackhawk@ivanhawkes.com> * 2005 Dominic Cerquetti <binary1230@yahoo.com> * 2006 Adam Buchbinder <adam.buchbinder@gmail.com> * 2007 Jan Kratochvil <honza@jikos.cz> * 2010 Christoph Fritz <chf.fritz@googlemail.com> * * This driver is based on: * - information from http://euc.jp/periphs/xbox-controller.ja.html * - the iForce driver drivers/char/joystick/iforce.c * - the skeleton-driver drivers/usb/usb-skeleton.c * - Xbox 360 information http://www.free60.org/wiki/Gamepad * - Xbox One information https://github.com/quantus/xbox-one-controller-protocol * * Thanks to: * - ITO Takayuki for providing essential xpad information on his website * - Vojtech Pavlik - iforce driver / input subsystem * - Greg Kroah-Hartman - usb-skeleton driver * - Xbox Linux project - extra USB IDs * - Pekka Pöyry (quantus) - Xbox One controller reverse-engineering * * TODO: * - fine tune axes (especially trigger axes) * - fix "analog" buttons (reported as digital now) * - get rumble working * - need USB IDs for other dance pads * * History: * * 2002-06-27 - 0.0.1 : first version, just said "XBOX HID controller" * * 2002-07-02 - 0.0.2 : basic working version * - all axes and 9 of the 10 buttons work (german InterAct device) * - the black button does not work * * 2002-07-14 - 0.0.3 : rework by Vojtech Pavlik * - indentation fixes * - usb + input init sequence fixes * * 2002-07-16 - 0.0.4 : minor changes, merge with Vojtech's v0.0.3 * - verified the lack of HID and report descriptors * - verified that ALL buttons WORK * - fixed d-pad to axes mapping * * 2002-07-17 - 0.0.5 : simplified d-pad handling * * 2004-10-02 - 0.0.6 : DDR pad support * - borrowed from the Xbox Linux kernel * - USB id's for commonly used dance pads are present * - dance pads will map D-PAD to buttons, not axes * - pass the module paramater 'dpad_to_buttons' to force * the D-PAD to map to buttons if your pad is not detected * * Later changes can be tracked in SCM. */ #include <linux/bits.h> #include <linux/kernel.h> #include <linux/input.h> #include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/module.h> #include <linux/usb/input.h> #include <linux/usb/quirks.h> #define XPAD_PKT_LEN 64 /* * xbox d-pads should map to buttons, as is required for DDR pads * but we map them to axes when possible to simplify things */ #define MAP_DPAD_TO_BUTTONS BIT(0) #define MAP_TRIGGERS_TO_BUTTONS BIT(1) #define MAP_STICKS_TO_NULL BIT(2) #define MAP_SHARE_BUTTON BIT(3) #define MAP_PADDLES BIT(4) #define MAP_PROFILE_BUTTON BIT(5) #define MAP_SHARE_OFFSET BIT(6) #define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \ MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL) #define XTYPE_XBOX 0 #define XTYPE_XBOX360 1 #define XTYPE_XBOX360W 2 #define XTYPE_XBOXONE 3 #define XTYPE_UNKNOWN 4 /* Send power-off packet to xpad360w after holding the mode button for this many * seconds */ #define XPAD360W_POWEROFF_TIMEOUT 5 #define PKT_XB 0 #define PKT_XBE1 1 #define PKT_XBE2_FW_OLD 2 #define PKT_XBE2_FW_5_EARLY 3 #define PKT_XBE2_FW_5_11 4 #define FLAG_DELAY_INIT BIT(0) static bool dpad_to_buttons; module_param(dpad_to_buttons, bool, S_IRUGO); MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads"); static bool triggers_to_buttons; module_param(triggers_to_buttons, bool, S_IRUGO); MODULE_PARM_DESC(triggers_to_buttons, "Map triggers to buttons rather than axes for unknown pads"); static bool sticks_to_null; module_param(sticks_to_null, bool, S_IRUGO); MODULE_PARM_DESC(sticks_to_null, "Do not map sticks at all for unknown pads"); static bool auto_poweroff = true; module_param(auto_poweroff, bool, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(auto_poweroff, "Power off wireless controllers on suspend"); static const struct xpad_device { u16 idVendor; u16 idProduct; char *name; u8 mapping; u8 xtype; u8 flags; } xpad_device[] = { /* Please keep this list sorted by vendor and product ID. */ { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 }, { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 }, { 0x03f0, 0x038D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wired */ { 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */ { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, { 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE }, { 0x03f0, 0x08B6, "HyperX Clutch Gladiate", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, /* v2 */ { 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE }, { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX }, { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, { 0x044f, 0xd01e, "ThrustMaster, Inc. ESWAP X 2 ELDEN RING EDITION", 0, XTYPE_XBOXONE }, { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX }, { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX }, { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX }, { 0x045e, 0x0288, "Microsoft Xbox Controller S v2", 0, XTYPE_XBOX }, { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX }, { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, { 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 }, { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x045e, 0x02a9, "Xbox 360 Wireless Receiver (Unofficial)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE }, { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE }, { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", MAP_PADDLES, XTYPE_XBOXONE }, { 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE }, { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE }, { 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE }, { 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SHARE_BUTTON | MAP_SHARE_OFFSET, XTYPE_XBOXONE }, { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 }, { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 }, { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 }, { 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 }, { 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX }, { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX }, { 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX }, { 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 }, { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX360 }, { 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 }, { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX }, { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX }, { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX }, { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX }, { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX }, { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX }, { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX }, { 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX }, { 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 }, { 0x0738, 0x4503, "Mad Catz Racing Wheel", 0, XTYPE_XBOXONE }, { 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX }, { 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX }, { 0x0738, 0x4522, "Mad Catz LumiCON", 0, XTYPE_XBOX }, { 0x0738, 0x4526, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX }, { 0x0738, 0x4530, "Mad Catz Universal MC2 Racing Wheel and Pedals", 0, XTYPE_XBOX }, { 0x0738, 0x4536, "Mad Catz MicroCON", 0, XTYPE_XBOX }, { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4586, "Mad Catz MicroCon Wireless Controller", 0, XTYPE_XBOX }, { 0x0738, 0x4588, "Mad Catz Blaster", 0, XTYPE_XBOX }, { 0x0738, 0x45ff, "Mad Catz Beat Pad (w/ Handle)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x0738, 0x4718, "Mad Catz Street Fighter IV FightStick SE", 0, XTYPE_XBOX360 }, { 0x0738, 0x4726, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x4736, "Mad Catz MicroCon Gamepad", 0, XTYPE_XBOX360 }, { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 }, { 0x0738, 0x4743, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x4758, "Mad Catz Arcade Game Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 }, { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, { 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", 0, XTYPE_XBOX360 }, { 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, { 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, { 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 }, { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 }, { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 }, { 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE }, { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX }, { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX }, { 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX }, { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX }, { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX }, { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX }, { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX }, { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x0db0, 0x1901, "Micro Star International Xbox360 Controller for Windows", 0, XTYPE_XBOX360 }, { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX }, { 0x0e4c, 0x1103, "Radica Gamester Reflex", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX }, { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX }, { 0x0e4c, 0x3510, "Radica Gamester", 0, XTYPE_XBOX }, { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX }, { 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX }, { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX }, { 0x0e6f, 0x0008, "After Glow Pro Controller", 0, XTYPE_XBOX }, { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x011f, "Rock Candy Gamepad Wired Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0131, "PDP EA Sports Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0133, "Xbox 360 Wired Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x013a, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0164, "PDP Battlefield One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0165, "PDP Titanfall 2", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0413, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0501, "PDP Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0xf900, "PDP Afterglow AX.1", 0, XTYPE_XBOX360 }, { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX }, { 0x0e8f, 0x3008, "Generic xbox control (dealextreme)", 0, XTYPE_XBOX }, { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 }, { 0x0f0d, 0x000c, "Hori PadEX Turbo", 0, XTYPE_XBOX360 }, { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0f0d, 0x001b, "Hori Real Arcade Pro VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE }, { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f0d, 0x00c5, "Hori Fighting Commander ONE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x0f0d, 0x00dc, "HORIPAD FPS for Nintendo Switch", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x0f0d, 0x0151, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE }, { 0x0f0d, 0x0152, "Hori Racing Wheel Overdrive for Xbox Series X", 0, XTYPE_XBOXONE }, { 0x0f0d, 0x01b2, "HORI Taiko No Tatsujin Drum Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX }, { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, { 0x10f5, 0x7005, "Turtle Beach Recon Controller", 0, XTYPE_XBOXONE }, { 0x10f5, 0x7008, "Turtle Beach Recon Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x10f5, 0x7073, "Turtle Beach Stealth Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 }, { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 }, { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, { 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE }, { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, { 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 }, { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 }, { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 }, { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x17ef, 0x6182, "Lenovo Legion Controller for Windows", 0, XTYPE_XBOX360 }, { 0x1949, 0x041a, "Amazon Game Controller", 0, XTYPE_XBOX360 }, { 0x1a86, 0xe310, "Legion Go S", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf018, "Mad Catz Street Fighter IV SE Fighting Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf019, "Mad Catz Brawlstick for Xbox 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf021, "Mad Cats Ghost Recon FS GamePad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf023, "MLG Pro Circuit Controller (Xbox)", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf025, "Mad Catz Call Of Duty", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf027, "Mad Catz FPS Pro", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf02e, "Mad Catz Fightpad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf030, "Mad Catz Xbox 360 MC2 MicroCon Racing Wheel", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf036, "Mad Catz MicroCon GamePad Pro", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf038, "Street Fighter IV FightStick TE", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf039, "Mad Catz MvC2 TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf03a, "Mad Catz SFxT Fightstick Pro", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf03d, "Street Fighter IV Arcade Stick TE - Chun Li", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf03e, "Mad Catz MLG FightStick TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf03f, "Mad Catz FightStick SoulCaliber", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf042, "Mad Catz FightStick TES+", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf080, "Mad Catz FightStick TE2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf501, "HoriPad EX2 Turbo", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf502, "Hori Real Arcade Pro.VX SA", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf503, "Hori Fighting Stick VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf504, "Hori Real Arcade Pro. EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf505, "Hori Fighting Stick EX2B", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xf506, "Hori Real Arcade Pro.EX Premium VLX", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf900, "Harmonix Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf904, "PDP Versus Fighting Pad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf906, "MortalKombat FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, { 0x1ee9, 0x1590, "ZOTAC Gaming Zone", 0, XTYPE_XBOX360 }, { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE }, { 0x20d6, 0x2064, "PowerA Wired Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, { 0x20d6, 0x400b, "PowerA FUSION Pro 4 Wired Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x20d6, 0x890b, "PowerA MOGA XP-Ultra Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x2345, 0xe00b, "Machenike G5 Pro Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x530a, "Xbox 360 Pro EX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x531a, "PowerA Pro Ex", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5397, "FUS1ON Tournament Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE }, { 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 }, { 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5510, "Hori Fighting Commander ONE (Xbox 360/PC Mode)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x581a, "ThrustMaster XB1 Classic Controller", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x2563, 0x058d, "OneXPlayer Gamepad", 0, XTYPE_XBOX360 }, { 0x294b, 0x3303, "Snakebyte GAMEPAD BASE X", 0, XTYPE_XBOXONE }, { 0x294b, 0x3404, "Snakebyte GAMEPAD RGB X", 0, XTYPE_XBOXONE }, { 0x2993, 0x2001, "TECNO Pocket Go", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x2000, "8BitDo Pro 2 Wired Controller fox Xbox", 0, XTYPE_XBOXONE }, { 0x2dc8, 0x200f, "8BitDo Ultimate 3-mode Controller for Xbox", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x310b, "8BitDo Ultimate 2 Wireless Controller", 0, XTYPE_XBOX360 }, { 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 }, { 0x2e24, 0x0423, "Hyperkin DuchesS Xbox One pad", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, { 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE }, { 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, { 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 }, { 0x3285, 0x0603, "Nacon Pro Compact controller for Xbox", 0, XTYPE_XBOXONE }, { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 }, { 0x3285, 0x0614, "Nacon Pro Compact", 0, XTYPE_XBOXONE }, { 0x3285, 0x0646, "Nacon Pro Compact", 0, XTYPE_XBOXONE }, { 0x3285, 0x0662, "Nacon Revolution5 Pro", 0, XTYPE_XBOX360 }, { 0x3285, 0x0663, "Nacon Evol-X", 0, XTYPE_XBOXONE }, { 0x3537, 0x1004, "GameSir T4 Kaleid", 0, XTYPE_XBOX360 }, { 0x3537, 0x1010, "GameSir G7 SE", 0, XTYPE_XBOXONE }, { 0x366c, 0x0005, "ByoWave Proteus Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE, FLAG_DELAY_INIT }, { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX }, { 0x413d, 0x2104, "Black Shark Green Ghost Gamepad", 0, XTYPE_XBOX360 }, { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN } }; /* buttons shared with xbox and xbox360 */ static const signed short xpad_common_btn[] = { BTN_A, BTN_B, BTN_X, BTN_Y, /* "analog" buttons */ BTN_START, BTN_SELECT, BTN_THUMBL, BTN_THUMBR, /* start/back/sticks */ -1 /* terminating entry */ }; /* original xbox controllers only */ static const signed short xpad_btn[] = { BTN_C, BTN_Z, /* "analog" buttons */ -1 /* terminating entry */ }; /* used when dpad is mapped to buttons */ static const signed short xpad_btn_pad[] = { BTN_DPAD_LEFT, BTN_DPAD_RIGHT, /* d-pad left, right */ BTN_DPAD_UP, BTN_DPAD_DOWN, /* d-pad up, down */ -1 /* terminating entry */ }; /* used when triggers are mapped to buttons */ static const signed short xpad_btn_triggers[] = { BTN_TL2, BTN_TR2, /* triggers left/right */ -1 }; static const signed short xpad360_btn[] = { /* buttons for x360 controller */ BTN_TL, BTN_TR, /* Button LB/RB */ BTN_MODE, /* The big X button */ -1 }; static const signed short xpad_abs[] = { ABS_X, ABS_Y, /* left stick */ ABS_RX, ABS_RY, /* right stick */ -1 /* terminating entry */ }; /* used when dpad is mapped to axes */ static const signed short xpad_abs_pad[] = { ABS_HAT0X, ABS_HAT0Y, /* d-pad axes */ -1 /* terminating entry */ }; /* used when triggers are mapped to axes */ static const signed short xpad_abs_triggers[] = { ABS_Z, ABS_RZ, /* triggers left/right */ -1 }; /* used when the controller has extra paddle buttons */ static const signed short xpad_btn_paddles[] = { BTN_GRIPR, BTN_GRIPR2, /* paddle upper right, lower right */ BTN_GRIPL, BTN_GRIPL2, /* paddle upper left, lower left */ -1 /* terminating entry */ }; /* * Xbox 360 has a vendor-specific class, so we cannot match it with only * USB_INTERFACE_INFO (also specifically refused by USB subsystem), so we * match against vendor id as well. Wired Xbox 360 devices have protocol 1, * wireless controllers have protocol 129. */ #define XPAD_XBOX360_VENDOR_PROTOCOL(vend, pr) \ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \ .idVendor = (vend), \ .bInterfaceClass = USB_CLASS_VENDOR_SPEC, \ .bInterfaceSubClass = 93, \ .bInterfaceProtocol = (pr) #define XPAD_XBOX360_VENDOR(vend) \ { XPAD_XBOX360_VENDOR_PROTOCOL((vend), 1) }, \ { XPAD_XBOX360_VENDOR_PROTOCOL((vend), 129) } /* The Xbox One controller uses subclass 71 and protocol 208. */ #define XPAD_XBOXONE_VENDOR_PROTOCOL(vend, pr) \ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \ .idVendor = (vend), \ .bInterfaceClass = USB_CLASS_VENDOR_SPEC, \ .bInterfaceSubClass = 71, \ .bInterfaceProtocol = (pr) #define XPAD_XBOXONE_VENDOR(vend) \ { XPAD_XBOXONE_VENDOR_PROTOCOL((vend), 208) } static const struct usb_device_id xpad_table[] = { /* * Please keep this list sorted by vendor ID. Note that there are 2 * macros - XPAD_XBOX360_VENDOR and XPAD_XBOXONE_VENDOR. */ { USB_INTERFACE_INFO('X', 'B', 0) }, /* Xbox USB-IF not-approved class */ XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 controller */ XPAD_XBOX360_VENDOR(0x03eb), /* Wooting Keyboards (Legacy) */ XPAD_XBOX360_VENDOR(0x03f0), /* HP HyperX Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x03f0), /* HP HyperX Xbox One controllers */ XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x044f), /* Thrustmaster Xbox One controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */ XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */ XPAD_XBOX360_VENDOR(0x0502), /* Acer Inc. Xbox 360 style controllers */ XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */ XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz Xbox 360 controllers */ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz Gamepad */ XPAD_XBOXONE_VENDOR(0x0b05), /* ASUS controllers */ XPAD_XBOX360_VENDOR(0x0c12), /* Zeroplus X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x0db0), /* Micro Star International X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f Xbox One controllers */ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori controllers */ XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori controllers */ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries controllers */ XPAD_XBOXONE_VENDOR(0x10f5), /* Turtle Beach Controllers */ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ XPAD_XBOX360_VENDOR(0x11ff), /* PXN V900 */ XPAD_XBOX360_VENDOR(0x1209), /* Ardwiino Controllers */ XPAD_XBOX360_VENDOR(0x12ab), /* Xbox 360 dance pads */ XPAD_XBOX360_VENDOR(0x1430), /* RedOctane Xbox 360 controllers */ XPAD_XBOX360_VENDOR(0x146b), /* Bigben Interactive controllers */ XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */ XPAD_XBOX360_VENDOR(0x15e4), /* Numark Xbox 360 controllers */ XPAD_XBOX360_VENDOR(0x162e), /* Joytech Xbox 360 controllers */ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ XPAD_XBOX360_VENDOR(0x17ef), /* Lenovo */ XPAD_XBOX360_VENDOR(0x1949), /* Amazon controllers */ XPAD_XBOX360_VENDOR(0x1a86), /* Nanjing Qinheng Microelectronics (WCH) */ XPAD_XBOX360_VENDOR(0x1bad), /* Harmonix Rock Band guitar and drums */ XPAD_XBOX360_VENDOR(0x1ee9), /* ZOTAC Technology Limited */ XPAD_XBOX360_VENDOR(0x20d6), /* PowerA controllers */ XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA controllers */ XPAD_XBOX360_VENDOR(0x2345), /* Machenike Controllers */ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA controllers */ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA controllers */ XPAD_XBOX360_VENDOR(0x2563), /* OneXPlayer Gamepad */ XPAD_XBOX360_VENDOR(0x260d), /* Dareu H101 */ XPAD_XBOXONE_VENDOR(0x294b), /* Snakebyte */ XPAD_XBOX360_VENDOR(0x2993), /* TECNO Mobile */ XPAD_XBOX360_VENDOR(0x2c22), /* Qanba Controllers */ XPAD_XBOX360_VENDOR(0x2dc8), /* 8BitDo Controllers */ XPAD_XBOXONE_VENDOR(0x2dc8), /* 8BitDo Controllers */ XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Controllers */ XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ XPAD_XBOXONE_VENDOR(0x2e95), /* SCUF Gaming Controller */ XPAD_XBOX360_VENDOR(0x31e3), /* Wooting Keyboards */ XPAD_XBOX360_VENDOR(0x3285), /* Nacon GC-100 */ XPAD_XBOXONE_VENDOR(0x3285), /* Nacon Evol-X */ XPAD_XBOX360_VENDOR(0x3537), /* GameSir Controllers */ XPAD_XBOXONE_VENDOR(0x3537), /* GameSir Controllers */ XPAD_XBOXONE_VENDOR(0x366c), /* ByoWave controllers */ XPAD_XBOX360_VENDOR(0x413d), /* Black Shark Green Ghost Controller */ { } }; MODULE_DEVICE_TABLE(usb, xpad_table); struct xboxone_init_packet { u16 idVendor; u16 idProduct; const u8 *data; u8 len; }; #define XBOXONE_INIT_PKT(_vid, _pid, _data) \ { \ .idVendor = (_vid), \ .idProduct = (_pid), \ .data = (_data), \ .len = ARRAY_SIZE(_data), \ } /* * starting with xbox one, the game input protocol is used * magic numbers are taken from * - https://github.com/xpadneo/gip-dissector/blob/main/src/gip-dissector.lua * - https://github.com/medusalix/xone/blob/master/bus/protocol.c */ #define GIP_CMD_ACK 0x01 #define GIP_CMD_ANNOUNCE 0x02 #define GIP_CMD_IDENTIFY 0x04 #define GIP_CMD_POWER 0x05 #define GIP_CMD_AUTHENTICATE 0x06 #define GIP_CMD_VIRTUAL_KEY 0x07 #define GIP_CMD_RUMBLE 0x09 #define GIP_CMD_LED 0x0a #define GIP_CMD_FIRMWARE 0x0c #define GIP_CMD_INPUT 0x20 #define GIP_SEQ0 0x00 #define GIP_OPT_ACK 0x10 #define GIP_OPT_INTERNAL 0x20 /* * length of the command payload encoded with * https://en.wikipedia.org/wiki/LEB128 * which is a no-op for N < 128 */ #define GIP_PL_LEN(N) (N) /* * payload specific defines */ #define GIP_PWR_ON 0x00 #define GIP_LED_ON 0x01 #define GIP_MOTOR_R BIT(0) #define GIP_MOTOR_L BIT(1) #define GIP_MOTOR_RT BIT(2) #define GIP_MOTOR_LT BIT(3) #define GIP_MOTOR_ALL (GIP_MOTOR_R | GIP_MOTOR_L | GIP_MOTOR_RT | GIP_MOTOR_LT) #define GIP_WIRED_INTF_DATA 0 #define GIP_WIRED_INTF_AUDIO 1 /* * This packet is required for all Xbox One pads with 2015 * or later firmware installed (or present from the factory). */ static const u8 xboxone_power_on[] = { GIP_CMD_POWER, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(1), GIP_PWR_ON }; /* * This packet is required for Xbox One S (0x045e:0x02ea) * and Xbox One Elite Series 2 (0x045e:0x0b00) pads to * initialize the controller that was previously used in * Bluetooth mode. */ static const u8 xboxone_s_init[] = { GIP_CMD_POWER, GIP_OPT_INTERNAL, GIP_SEQ0, 0x0f, 0x06 }; /* * This packet is required to get additional input data * from Xbox One Elite Series 2 (0x045e:0x0b00) pads. * We mostly do this right now to get paddle data */ static const u8 extra_input_packet_init[] = { 0x4d, 0x10, 0x01, 0x02, 0x07, 0x00 }; /* * This packet is required for the Titanfall 2 Xbox One pads * (0x0e6f:0x0165) to finish initialization and for Hori pads * (0x0f0d:0x0067) to make the analog sticks work. */ static const u8 xboxone_hori_ack_id[] = { GIP_CMD_ACK, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(9), 0x00, GIP_CMD_IDENTIFY, GIP_OPT_INTERNAL, 0x3a, 0x00, 0x00, 0x00, 0x80, 0x00 }; /* * This packet is sent by default on Windows, and is required for some pads to * start sending input reports, including most (all?) of the PDP. These pads * include: (0x0e6f:0x02ab), (0x0e6f:0x02a4), (0x0e6f:0x02a6). */ static const u8 xboxone_led_on[] = { GIP_CMD_LED, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(3), 0x00, GIP_LED_ON, 0x14 }; /* * This packet is required for most (all?) of the PDP pads to start * sending input reports. These pads include: (0x0e6f:0x02ab), * (0x0e6f:0x02a4), (0x0e6f:0x02a6). */ static const u8 xboxone_auth_done[] = { GIP_CMD_AUTHENTICATE, GIP_OPT_INTERNAL, GIP_SEQ0, GIP_PL_LEN(2), 0x01, 0x00 }; /* * A specific rumble packet is required for some PowerA pads to start * sending input reports. One of those pads is (0x24c6:0x543a). */ static const u8 xboxone_rumblebegin_init[] = { GIP_CMD_RUMBLE, 0x00, GIP_SEQ0, GIP_PL_LEN(9), 0x00, GIP_MOTOR_ALL, 0x00, 0x00, 0x1D, 0x1D, 0xFF, 0x00, 0x00 }; /* * A rumble packet with zero FF intensity will immediately * terminate the rumbling required to init PowerA pads. * This should happen fast enough that the motors don't * spin up to enough speed to actually vibrate the gamepad. */ static const u8 xboxone_rumbleend_init[] = { GIP_CMD_RUMBLE, 0x00, GIP_SEQ0, GIP_PL_LEN(9), 0x00, GIP_MOTOR_ALL, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; /* * This specifies the selection of init packets that a gamepad * will be sent on init *and* the order in which they will be * sent. The correct sequence number will be added when the * packet is going to be sent. */ static const struct xboxone_init_packet xboxone_init_packets[] = { XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_ack_id), XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_ack_id), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_power_on), XBOXONE_INIT_PKT(0x045e, 0x02ea, xboxone_s_init), XBOXONE_INIT_PKT(0x045e, 0x0b00, xboxone_s_init), XBOXONE_INIT_PKT(0x045e, 0x0b00, extra_input_packet_init), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_led_on), XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_auth_done), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init), XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init), XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init), }; struct xpad_output_packet { u8 data[XPAD_PKT_LEN]; u8 len; bool pending; }; #define XPAD_OUT_CMD_IDX 0 #define XPAD_OUT_FF_IDX 1 #define XPAD_OUT_LED_IDX (1 + IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF)) #define XPAD_NUM_OUT_PACKETS (1 + \ IS_ENABLED(CONFIG_JOYSTICK_XPAD_FF) + \ IS_ENABLED(CONFIG_JOYSTICK_XPAD_LEDS)) struct usb_xpad { struct input_dev *dev; /* input device interface */ struct input_dev __rcu *x360w_dev; struct usb_device *udev; /* usb device */ struct usb_interface *intf; /* usb interface */ bool pad_present; bool input_created; struct urb *irq_in; /* urb for interrupt in report */ unsigned char *idata; /* input data */ dma_addr_t idata_dma; struct urb *irq_out; /* urb for interrupt out report */ struct usb_anchor irq_out_anchor; bool irq_out_active; /* we must not use an active URB */ u8 odata_serial; /* serial number for xbox one protocol */ unsigned char *odata; /* output data */ dma_addr_t odata_dma; spinlock_t odata_lock; struct xpad_output_packet out_packets[XPAD_NUM_OUT_PACKETS]; int last_out_packet; int init_seq; #if defined(CONFIG_JOYSTICK_XPAD_LEDS) struct xpad_led *led; #endif char phys[64]; /* physical device path */ int mapping; /* map d-pad to buttons or to axes */ int xtype; /* type of xbox device */ int packet_type; /* type of the extended packet */ int pad_nr; /* the order x360 pads were attached */ const char *name; /* name of the device */ struct work_struct work; /* init/remove device from callback */ time64_t mode_btn_down_ts; bool delay_init; /* init packets should be delayed */ bool delayed_init_done; }; static int xpad_init_input(struct usb_xpad *xpad); static void xpad_deinit_input(struct usb_xpad *xpad); static int xpad_start_input(struct usb_xpad *xpad); static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num); static void xpad360w_poweroff_controller(struct usb_xpad *xpad); /* * xpad_process_packet * * Completes a request by converting the data into events for the * input subsystem. * * The used report descriptor was taken from ITO Takayuki's website: * http://euc.jp/periphs/xbox-controller.ja.html */ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data) { struct input_dev *dev = xpad->dev; if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { /* left stick */ input_report_abs(dev, ABS_X, (__s16) le16_to_cpup((__le16 *)(data + 12))); input_report_abs(dev, ABS_Y, ~(__s16) le16_to_cpup((__le16 *)(data + 14))); /* right stick */ input_report_abs(dev, ABS_RX, (__s16) le16_to_cpup((__le16 *)(data + 16))); input_report_abs(dev, ABS_RY, ~(__s16) le16_to_cpup((__le16 *)(data + 18))); } /* triggers left/right */ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { input_report_key(dev, BTN_TL2, data[10]); input_report_key(dev, BTN_TR2, data[11]); } else { input_report_abs(dev, ABS_Z, data[10]); input_report_abs(dev, ABS_RZ, data[11]); } /* digital pad */ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { /* dpad as buttons (left, right, up, down) */ input_report_key(dev, BTN_DPAD_LEFT, data[2] & BIT(2)); input_report_key(dev, BTN_DPAD_RIGHT, data[2] & BIT(3)); input_report_key(dev, BTN_DPAD_UP, data[2] & BIT(0)); input_report_key(dev, BTN_DPAD_DOWN, data[2] & BIT(1)); } else { input_report_abs(dev, ABS_HAT0X, !!(data[2] & 0x08) - !!(data[2] & 0x04)); input_report_abs(dev, ABS_HAT0Y, !!(data[2] & 0x02) - !!(data[2] & 0x01)); } /* start/back buttons and stick press left/right */ input_report_key(dev, BTN_START, data[2] & BIT(4)); input_report_key(dev, BTN_SELECT, data[2] & BIT(5)); input_report_key(dev, BTN_THUMBL, data[2] & BIT(6)); input_report_key(dev, BTN_THUMBR, data[2] & BIT(7)); /* "analog" buttons A, B, X, Y */ input_report_key(dev, BTN_A, data[4]); input_report_key(dev, BTN_B, data[5]); input_report_key(dev, BTN_X, data[6]); input_report_key(dev, BTN_Y, data[7]); /* "analog" buttons black, white */ input_report_key(dev, BTN_C, data[8]); input_report_key(dev, BTN_Z, data[9]); input_sync(dev); } /* * xpad360_process_packet * * Completes a request by converting the data into events for the * input subsystem. It is version for xbox 360 controller * * The used report descriptor was taken from: * http://www.free60.org/wiki/Gamepad */ static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev, u16 cmd, unsigned char *data) { /* valid pad data */ if (data[0] != 0x00) return; /* digital pad */ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { /* dpad as buttons (left, right, up, down) */ input_report_key(dev, BTN_DPAD_LEFT, data[2] & BIT(2)); input_report_key(dev, BTN_DPAD_RIGHT, data[2] & BIT(3)); input_report_key(dev, BTN_DPAD_UP, data[2] & BIT(0)); input_report_key(dev, BTN_DPAD_DOWN, data[2] & BIT(1)); } /* * This should be a simple else block. However historically * xbox360w has mapped DPAD to buttons while xbox360 did not. This * made no sense, but now we can not just switch back and have to * support both behaviors. */ if (!(xpad->mapping & MAP_DPAD_TO_BUTTONS) || xpad->xtype == XTYPE_XBOX360W) { input_report_abs(dev, ABS_HAT0X, !!(data[2] & 0x08) - !!(data[2] & 0x04)); input_report_abs(dev, ABS_HAT0Y, !!(data[2] & 0x02) - !!(data[2] & 0x01)); } /* start/back buttons */ input_report_key(dev, BTN_START, data[2] & BIT(4)); input_report_key(dev, BTN_SELECT, data[2] & BIT(5)); /* stick press left/right */ input_report_key(dev, BTN_THUMBL, data[2] & BIT(6)); input_report_key(dev, BTN_THUMBR, data[2] & BIT(7)); /* buttons A,B,X,Y,TL,TR and MODE */ input_report_key(dev, BTN_A, data[3] & BIT(4)); input_report_key(dev, BTN_B, data[3] & BIT(5)); input_report_key(dev, BTN_X, data[3] & BIT(6)); input_report_key(dev, BTN_Y, data[3] & BIT(7)); input_report_key(dev, BTN_TL, data[3] & BIT(0)); input_report_key(dev, BTN_TR, data[3] & BIT(1)); input_report_key(dev, BTN_MODE, data[3] & BIT(2)); if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { /* left stick */ input_report_abs(dev, ABS_X, (__s16) le16_to_cpup((__le16 *)(data + 6))); input_report_abs(dev, ABS_Y, ~(__s16) le16_to_cpup((__le16 *)(data + 8))); /* right stick */ input_report_abs(dev, ABS_RX, (__s16) le16_to_cpup((__le16 *)(data + 10))); input_report_abs(dev, ABS_RY, ~(__s16) le16_to_cpup((__le16 *)(data + 12))); } /* triggers left/right */ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { input_report_key(dev, BTN_TL2, data[4]); input_report_key(dev, BTN_TR2, data[5]); } else { input_report_abs(dev, ABS_Z, data[4]); input_report_abs(dev, ABS_RZ, data[5]); } input_sync(dev); /* XBOX360W controllers can't be turned off without driver assistance */ if (xpad->xtype == XTYPE_XBOX360W) { if (xpad->mode_btn_down_ts > 0 && xpad->pad_present && ((ktime_get_seconds() - xpad->mode_btn_down_ts) >= XPAD360W_POWEROFF_TIMEOUT)) { xpad360w_poweroff_controller(xpad); xpad->mode_btn_down_ts = 0; return; } /* mode button down/up */ if (data[3] & BIT(2)) xpad->mode_btn_down_ts = ktime_get_seconds(); else xpad->mode_btn_down_ts = 0; } } static void xpad_presence_work(struct work_struct *work) { struct usb_xpad *xpad = container_of(work, struct usb_xpad, work); int error; if (xpad->pad_present) { error = xpad_init_input(xpad); if (error) { /* complain only, not much else we can do here */ dev_err(&xpad->dev->dev, "unable to init device: %d\n", error); } else { rcu_assign_pointer(xpad->x360w_dev, xpad->dev); } } else { RCU_INIT_POINTER(xpad->x360w_dev, NULL); synchronize_rcu(); /* * Now that we are sure xpad360w_process_packet is not * using input device we can get rid of it. */ xpad_deinit_input(xpad); } } /* * xpad360w_process_packet * * Completes a request by converting the data into events for the * input subsystem. It is version for xbox 360 wireless controller. * * Byte.Bit * 00.1 - Status change: The controller or headset has connected/disconnected * Bits 01.7 and 01.6 are valid * 01.7 - Controller present * 01.6 - Headset present * 01.1 - Pad state (Bytes 4+) valid * */ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data) { struct input_dev *dev; bool present; /* Presence change */ if (data[0] & 0x08) { present = (data[1] & 0x80) != 0; if (xpad->pad_present != present) { xpad->pad_present = present; schedule_work(&xpad->work); } } /* Valid pad data */ if (data[1] != 0x1) return; rcu_read_lock(); dev = rcu_dereference(xpad->x360w_dev); if (dev) xpad360_process_packet(xpad, dev, cmd, &data[4]); rcu_read_unlock(); } /* * xpadone_process_packet * * Completes a request by converting the data into events for the * input subsystem. This version is for the Xbox One controller. * * The report format was gleaned from * https://github.com/kylelemons/xbox/blob/master/xbox.go */ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data, u32 len) { struct input_dev *dev = xpad->dev; bool do_sync = false; /* the xbox button has its own special report */ if (data[0] == GIP_CMD_VIRTUAL_KEY) { /* * The Xbox One S controller requires these reports to be * acked otherwise it continues sending them forever and * won't report further mode button events. */ if (data[1] == (GIP_OPT_ACK | GIP_OPT_INTERNAL)) xpadone_ack_mode_report(xpad, data[2]); input_report_key(dev, BTN_MODE, data[4] & GENMASK(1, 0)); input_sync(dev); do_sync = true; } else if (data[0] == GIP_CMD_FIRMWARE) { /* Some packet formats force us to use this separate to poll paddle inputs */ if (xpad->packet_type == PKT_XBE2_FW_5_11) { /* Mute paddles if controller is in a custom profile slot * Checked by looking at the active profile slot to * verify it's the default slot */ if (data[19] != 0) data[18] = 0; /* Elite Series 2 split packet paddle bits */ input_report_key(dev, BTN_GRIPR, data[18] & BIT(0)); input_report_key(dev, BTN_GRIPR2, data[18] & BIT(1)); input_report_key(dev, BTN_GRIPL, data[18] & BIT(2)); input_report_key(dev, BTN_GRIPL2, data[18] & BIT(3)); do_sync = true; } } else if (data[0] == GIP_CMD_ANNOUNCE) { int error; if (xpad->delay_init && !xpad->delayed_init_done) { xpad->delayed_init_done = true; error = xpad_start_input(xpad); if (error) dev_warn(&xpad->dev->dev, "unable to start delayed input: %d\n", error); } } else if (data[0] == GIP_CMD_INPUT) { /* The main valid packet type for inputs */ /* menu/view buttons */ input_report_key(dev, BTN_START, data[4] & BIT(2)); input_report_key(dev, BTN_SELECT, data[4] & BIT(3)); if (xpad->mapping & MAP_SHARE_BUTTON) { if (xpad->mapping & MAP_SHARE_OFFSET) input_report_key(dev, KEY_RECORD, data[len - 26] & BIT(0)); else input_report_key(dev, KEY_RECORD, data[len - 18] & BIT(0)); } /* buttons A,B,X,Y */ input_report_key(dev, BTN_A, data[4] & BIT(4)); input_report_key(dev, BTN_B, data[4] & BIT(5)); input_report_key(dev, BTN_X, data[4] & BIT(6)); input_report_key(dev, BTN_Y, data[4] & BIT(7)); /* digital pad */ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { /* dpad as buttons (left, right, up, down) */ input_report_key(dev, BTN_DPAD_LEFT, data[5] & BIT(2)); input_report_key(dev, BTN_DPAD_RIGHT, data[5] & BIT(3)); input_report_key(dev, BTN_DPAD_UP, data[5] & BIT(0)); input_report_key(dev, BTN_DPAD_DOWN, data[5] & BIT(1)); } else { input_report_abs(dev, ABS_HAT0X, !!(data[5] & 0x08) - !!(data[5] & 0x04)); input_report_abs(dev, ABS_HAT0Y, !!(data[5] & 0x02) - !!(data[5] & 0x01)); } /* TL/TR */ input_report_key(dev, BTN_TL, data[5] & BIT(4)); input_report_key(dev, BTN_TR, data[5] & BIT(5)); /* stick press left/right */ input_report_key(dev, BTN_THUMBL, data[5] & BIT(6)); input_report_key(dev, BTN_THUMBR, data[5] & BIT(7)); if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { /* left stick */ input_report_abs(dev, ABS_X, (__s16) le16_to_cpup((__le16 *)(data + 10))); input_report_abs(dev, ABS_Y, ~(__s16) le16_to_cpup((__le16 *)(data + 12))); /* right stick */ input_report_abs(dev, ABS_RX, (__s16) le16_to_cpup((__le16 *)(data + 14))); input_report_abs(dev, ABS_RY, ~(__s16) le16_to_cpup((__le16 *)(data + 16))); } /* triggers left/right */ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { input_report_key(dev, BTN_TL2, (__u16) le16_to_cpup((__le16 *)(data + 6))); input_report_key(dev, BTN_TR2, (__u16) le16_to_cpup((__le16 *)(data + 8))); } else { input_report_abs(dev, ABS_Z, (__u16) le16_to_cpup((__le16 *)(data + 6))); input_report_abs(dev, ABS_RZ, (__u16) le16_to_cpup((__le16 *)(data + 8))); } /* Profile button has a value of 0-3, so it is reported as an axis */ if (xpad->mapping & MAP_PROFILE_BUTTON) input_report_abs(dev, ABS_PROFILE, data[34]); /* paddle handling */ /* based on SDL's SDL_hidapi_xboxone.c */ if (xpad->mapping & MAP_PADDLES) { if (xpad->packet_type == PKT_XBE1) { /* Mute paddles if controller has a custom mapping applied. * Checked by comparing the current mapping * config against the factory mapping config */ if (memcmp(&data[4], &data[18], 2) != 0) data[32] = 0; /* OG Elite Series Controller paddle bits */ input_report_key(dev, BTN_GRIPR, data[32] & BIT(1)); input_report_key(dev, BTN_GRIPR2, data[32] & BIT(3)); input_report_key(dev, BTN_GRIPL, data[32] & BIT(0)); input_report_key(dev, BTN_GRIPL2, data[32] & BIT(2)); } else if (xpad->packet_type == PKT_XBE2_FW_OLD) { /* Mute paddles if controller has a custom mapping applied. * Checked by comparing the current mapping * config against the factory mapping config */ if (data[19] != 0) data[18] = 0; /* Elite Series 2 4.x firmware paddle bits */ input_report_key(dev, BTN_GRIPR, data[18] & BIT(0)); input_report_key(dev, BTN_GRIPR2, data[18] & BIT(1)); input_report_key(dev, BTN_GRIPL, data[18] & BIT(2)); input_report_key(dev, BTN_GRIPL2, data[18] & BIT(3)); } else if (xpad->packet_type == PKT_XBE2_FW_5_EARLY) { /* Mute paddles if controller has a custom mapping applied. * Checked by comparing the current mapping * config against the factory mapping config */ if (data[23] != 0) data[22] = 0; /* Elite Series 2 5.x firmware paddle bits * (before the packet was split) */ input_report_key(dev, BTN_GRIPR, data[22] & BIT(0)); input_report_key(dev, BTN_GRIPR2, data[22] & BIT(1)); input_report_key(dev, BTN_GRIPL, data[22] & BIT(2)); input_report_key(dev, BTN_GRIPL2, data[22] & BIT(3)); } } do_sync = true; } if (do_sync) input_sync(dev); } static void xpad_irq_in(struct urb *urb) { struct usb_xpad *xpad = urb->context; struct device *dev = &xpad->intf->dev; int retval, status; status = urb->status; switch (status) { case 0: /* success */ break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status); return; default: dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status); goto exit; } switch (xpad->xtype) { case XTYPE_XBOX360: xpad360_process_packet(xpad, xpad->dev, 0, xpad->idata); break; case XTYPE_XBOX360W: xpad360w_process_packet(xpad, 0, xpad->idata); break; case XTYPE_XBOXONE: xpadone_process_packet(xpad, 0, xpad->idata, urb->actual_length); break; default: xpad_process_packet(xpad, 0, xpad->idata); } exit: retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, retval); } /* Callers must hold xpad->odata_lock spinlock */ static bool xpad_prepare_next_init_packet(struct usb_xpad *xpad) { const struct xboxone_init_packet *init_packet; if (xpad->xtype != XTYPE_XBOXONE) return false; /* * Some dongles will discard init packets if they're sent before the * controller connects. In these cases, we need to wait until we get * an announce packet from them to send the init packet sequence. */ if (xpad->delay_init && !xpad->delayed_init_done) return false; /* Perform initialization sequence for Xbox One pads that require it */ while (xpad->init_seq < ARRAY_SIZE(xboxone_init_packets)) { init_packet = &xboxone_init_packets[xpad->init_seq++]; if (init_packet->idVendor != 0 && init_packet->idVendor != xpad->dev->id.vendor) continue; if (init_packet->idProduct != 0 && init_packet->idProduct != xpad->dev->id.product) continue; /* This packet applies to our device, so prepare to send it */ memcpy(xpad->odata, init_packet->data, init_packet->len); xpad->irq_out->transfer_buffer_length = init_packet->len; /* Update packet with current sequence number */ xpad->odata[2] = xpad->odata_serial++; return true; } return false; } /* Callers must hold xpad->odata_lock spinlock */ static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad) { struct xpad_output_packet *pkt, *packet = NULL; int i; /* We may have init packets to send before we can send user commands */ if (xpad_prepare_next_init_packet(xpad)) return true; for (i = 0; i < XPAD_NUM_OUT_PACKETS; i++) { if (++xpad->last_out_packet >= XPAD_NUM_OUT_PACKETS) xpad->last_out_packet = 0; pkt = &xpad->out_packets[xpad->last_out_packet]; if (pkt->pending) { dev_dbg(&xpad->intf->dev, "%s - found pending output packet %d\n", __func__, xpad->last_out_packet); packet = pkt; break; } } if (packet) { memcpy(xpad->odata, packet->data, packet->len); xpad->irq_out->transfer_buffer_length = packet->len; packet->pending = false; return true; } return false; } /* Callers must hold xpad->odata_lock spinlock */ static int xpad_try_sending_next_out_packet(struct usb_xpad *xpad) { int error; if (!xpad->irq_out_active && xpad_prepare_next_out_packet(xpad)) { usb_anchor_urb(xpad->irq_out, &xpad->irq_out_anchor); error = usb_submit_urb(xpad->irq_out, GFP_ATOMIC); if (error) { if (error != -ENODEV) dev_err(&xpad->intf->dev, "%s - usb_submit_urb failed with result %d\n", __func__, error); usb_unanchor_urb(xpad->irq_out); return error; } xpad->irq_out_active = true; } return 0; } static void xpad_irq_out(struct urb *urb) { struct usb_xpad *xpad = urb->context; struct device *dev = &xpad->intf->dev; int status = urb->status; int error; guard(spinlock_irqsave)(&xpad->odata_lock); switch (status) { case 0: /* success */ xpad->irq_out_active = xpad_prepare_next_out_packet(xpad); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: /* this urb is terminated, clean up */ dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status); xpad->irq_out_active = false; break; default: dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status); break; } if (xpad->irq_out_active) { usb_anchor_urb(urb, &xpad->irq_out_anchor); error = usb_submit_urb(urb, GFP_ATOMIC); if (error) { dev_err(dev, "%s - usb_submit_urb failed with result %d\n", __func__, error); usb_unanchor_urb(urb); xpad->irq_out_active = false; } } } static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad, struct usb_endpoint_descriptor *ep_irq_out) { int error; if (xpad->xtype == XTYPE_UNKNOWN) return 0; init_usb_anchor(&xpad->irq_out_anchor); xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN, GFP_KERNEL, &xpad->odata_dma); if (!xpad->odata) return -ENOMEM; spin_lock_init(&xpad->odata_lock); xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL); if (!xpad->irq_out) { error = -ENOMEM; goto err_free_coherent; } usb_fill_int_urb(xpad->irq_out, xpad->udev, usb_sndintpipe(xpad->udev, ep_irq_out->bEndpointAddress), xpad->odata, XPAD_PKT_LEN, xpad_irq_out, xpad, ep_irq_out->bInterval); xpad->irq_out->transfer_dma = xpad->odata_dma; xpad->irq_out->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; return 0; err_free_coherent: usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma); return error; } static void xpad_stop_output(struct usb_xpad *xpad) { if (xpad->xtype != XTYPE_UNKNOWN) { if (!usb_wait_anchor_empty_timeout(&xpad->irq_out_anchor, 5000)) { dev_warn(&xpad->intf->dev, "timed out waiting for output URB to complete, killing\n"); usb_kill_anchored_urbs(&xpad->irq_out_anchor); } } } static void xpad_deinit_output(struct usb_xpad *xpad) { if (xpad->xtype != XTYPE_UNKNOWN) { usb_free_urb(xpad->irq_out); usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma); } } static int xpad_inquiry_pad_presence(struct usb_xpad *xpad) { struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_CMD_IDX]; guard(spinlock_irqsave)(&xpad->odata_lock); packet->data[0] = 0x08; packet->data[1] = 0x00; packet->data[2] = 0x0F; packet->data[3] = 0xC0; packet->data[4] = 0x00; packet->data[5] = 0x00; packet->data[6] = 0x00; packet->data[7] = 0x00; packet->data[8] = 0x00; packet->data[9] = 0x00; packet->data[10] = 0x00; packet->data[11] = 0x00; packet->len = 12; packet->pending = true; /* Reset the sequence so we send out presence first */ xpad->last_out_packet = -1; return xpad_try_sending_next_out_packet(xpad); } static int xpad_start_xbox_one(struct usb_xpad *xpad) { int error; if (usb_ifnum_to_if(xpad->udev, GIP_WIRED_INTF_AUDIO)) { /* * Explicitly disable the audio interface. This is needed * for some controllers, such as the PowerA Enhanced Wired * Controller for Series X|S (0x20d6:0x200e) to report the * guide button. */ error = usb_set_interface(xpad->udev, GIP_WIRED_INTF_AUDIO, 0); if (error) dev_warn(&xpad->dev->dev, "unable to disable audio interface: %d\n", error); } guard(spinlock_irqsave)(&xpad->odata_lock); /* * Begin the init sequence by attempting to send a packet. * We will cycle through the init packet sequence before * sending any packets from the output ring. */ xpad->init_seq = 0; return xpad_try_sending_next_out_packet(xpad); } static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num) { struct xpad_output_packet *packet = &xpad->out_packets[XPAD_OUT_CM |